X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fvport.c;h=6c8eb0845aec6dff26031cbabb1b329afa6789d1;hb=45ca68cbadc3697ac95c53939d2877c6c2a3782e;hp=712c26e5d3ddae51386433bc437df41573aa9e07;hpb=f9764f6e911ab3b034b3f390f8303ce1396a4dd8;p=openvswitch diff --git a/datapath/vport.c b/datapath/vport.c index 712c26e5..6c8eb084 100644 --- a/datapath/vport.c +++ b/datapath/vport.c @@ -6,6 +6,8 @@ * kernel, by Linus Torvalds and others. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -16,6 +18,7 @@ #include #include #include +#include #include "vport.h" #include "vport-internal_dev.h" @@ -27,6 +30,9 @@ static struct vport_ops *base_vport_ops_list[] = { &internal_vport_ops, &patch_vport_ops, &gre_vport_ops, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) + &capwap_vport_ops, +#endif }; static const struct vport_ops **vport_ops_list; @@ -35,17 +41,6 @@ static int n_vport_types; static struct hlist_head *dev_table; #define VPORT_HASH_BUCKETS 1024 -/* We limit the number of times that we pass through vport_send() to - * avoid blowing out the stack in the event that we have a loop. There is - * a separate counter for each CPU for both interrupt and non-interrupt - * context in order to keep the limit deterministic for a given packet. */ -struct percpu_loop_counter { - int count[2]; -}; - -static DEFINE_PER_CPU(struct percpu_loop_counter, vport_loop_counter); -#define VPORT_MAX_LOOPS 5 - /* Both RTNL lock and vport_mutex need to be held when updating dev_table. * * If you use vport_locate and then perform some operations, you need to hold @@ -83,13 +78,14 @@ void vport_unlock(void) mutex_unlock(&vport_mutex); } -#define ASSERT_VPORT() do { \ - if (unlikely(!mutex_is_locked(&vport_mutex))) { \ - printk(KERN_ERR "openvswitch: vport lock not held at %s (%d)\n", \ - __FILE__, __LINE__); \ - dump_stack(); \ - } \ -} while(0) +#define ASSERT_VPORT() \ +do { \ + if (unlikely(!mutex_is_locked(&vport_mutex))) { \ + pr_err("vport lock not held at %s (%d)\n", \ + __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while (0) /** * vport_init - initialize vport subsystem @@ -623,7 +619,7 @@ struct vport *vport_locate(const char *name) struct hlist_node *node; if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) { - printk(KERN_ERR "openvswitch: neither RTNL nor vport lock held in vport_locate\n"); + pr_err("neither RTNL nor vport lock held in vport_locate\n"); dump_stack(); } @@ -798,9 +794,6 @@ int vport_attach(struct vport *vport, struct dp_port *dp_port) { ASSERT_RTNL(); - if (dp_port->vport) - return -EBUSY; - if (vport_get_dp_port(vport)) return -EBUSY; @@ -812,7 +805,6 @@ int vport_attach(struct vport *vport, struct dp_port *dp_port) return err; } - dp_port->vport = vport; rcu_assign_pointer(vport->dp_port, dp_port); return 0; @@ -836,7 +828,6 @@ int vport_detach(struct vport *vport) if (!dp_port) return -EINVAL; - dp_port->vport = NULL; rcu_assign_pointer(vport->dp_port, NULL); if (vport->ops->detach) @@ -1068,12 +1059,20 @@ int vport_get_stats(struct vport *vport, struct odp_vport_stats *stats) for_each_possible_cpu(i) { const struct vport_percpu_stats *percpu_stats; + struct vport_percpu_stats local_stats; + unsigned seqcount; percpu_stats = per_cpu_ptr(vport->percpu_stats, i); - stats->rx_bytes += percpu_stats->rx_bytes; - stats->rx_packets += percpu_stats->rx_packets; - stats->tx_bytes += percpu_stats->tx_bytes; - stats->tx_packets += percpu_stats->tx_packets; + + do { + seqcount = read_seqcount_begin(&percpu_stats->seqlock); + local_stats = *percpu_stats; + } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount)); + + stats->rx_bytes += local_stats.rx_bytes; + stats->rx_packets += local_stats.rx_packets; + stats->tx_bytes += local_stats.tx_bytes; + stats->tx_packets += local_stats.tx_packets; } err = 0; @@ -1208,14 +1207,19 @@ void vport_receive(struct vport *vport, struct sk_buff *skb) struct vport_percpu_stats *stats; local_bh_disable(); - stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); stats->rx_packets++; stats->rx_bytes += skb->len; + write_seqcount_end(&stats->seqlock); local_bh_enable(); } + if (!(vport->ops->flags & VPORT_F_FLOW)) + OVS_CB(skb)->flow = NULL; + if (!(vport->ops->flags & VPORT_F_TUN_ID)) OVS_CB(skb)->tun_id = 0; @@ -1243,25 +1247,15 @@ static inline unsigned packet_length(const struct sk_buff *skb) */ int vport_send(struct vport *vport, struct sk_buff *skb) { - int *loop_count; int mtu; int sent; - loop_count = &get_cpu_var(vport_loop_counter).count[!!in_interrupt()]; - (*loop_count)++; - - if (unlikely(*loop_count > VPORT_MAX_LOOPS)) { - if (net_ratelimit()) - printk(KERN_WARNING "%s: dropping packet that has looped more than %d times\n", - dp_name(vport_get_dp_port(vport)->dp), VPORT_MAX_LOOPS); - goto error; - } - mtu = vport_get_mtu(vport); if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { if (net_ratelimit()) - printk(KERN_WARNING "%s: dropped over-mtu packet: %d > %d\n", - dp_name(vport_get_dp_port(vport)->dp), packet_length(skb), mtu); + pr_warn("%s: dropped over-mtu packet: %d > %d\n", + dp_name(vport_get_dp_port(vport)->dp), + packet_length(skb), mtu); goto error; } @@ -1271,25 +1265,22 @@ int vport_send(struct vport *vport, struct sk_buff *skb) struct vport_percpu_stats *stats; local_bh_disable(); - stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); stats->tx_packets++; stats->tx_bytes += sent; + write_seqcount_end(&stats->seqlock); local_bh_enable(); } - goto out; + return sent; error: - sent = 0; kfree_skb(skb); vport_record_error(vport, VPORT_E_TX_DROPPED); -out: - (*loop_count)--; - put_cpu_var(vport_loop_counter); - - return sent; + return 0; } /**