X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fdatapath.c;h=a34049f931383fd52478b182531fa815476261a8;hb=7d0ab001dbc7bd4285aaf1dbcb881312ec32608c;hp=73f734801760d3fc491c9f9160371322d26c1d8a;hpb=b4a7d61582ef3a09a666106a926c8912201dfe72;p=openvswitch diff --git a/datapath/datapath.c b/datapath/datapath.c index 73f73480..a34049f9 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -69,6 +69,23 @@ EXPORT_SYMBOL(dp_ioctl_hook); static struct datapath *dps[ODP_MAX]; static DEFINE_MUTEX(dp_mutex); +/* We limit the number of times that we pass into dp_process_received_packet() + * to avoid blowing out the stack in the event that we have a loop. */ +struct loop_counter { + int count; /* Count. */ + bool looping; /* Loop detected? */ +}; + +#define DP_MAX_LOOPS 5 + +/* We use a separate counter for each CPU for both interrupt and non-interrupt + * context in order to keep the limit deterministic for a given packet. */ +struct percpu_loop_counters { + struct loop_counter counters[2]; +}; + +static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters); + static int new_dp_port(struct datapath *, struct odp_port *, int port_no); /* Must be called with rcu_read_lock or dp_mutex. */ @@ -377,6 +394,7 @@ static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_ p->port_no = port_no; p->dp = dp; + p->vport = vport; atomic_set(&p->sflow_pool, 0); err = vport_attach(vport, p); @@ -510,6 +528,14 @@ out: return err; } +static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions) +{ + if (net_ratelimit()) + printk(KERN_WARNING "%s: flow looped %d times, dropping\n", + dp_name(dp), DP_MAX_LOOPS); + actions->n_actions = 0; +} + /* Must be called with rcu_read_lock. */ void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb) { @@ -518,34 +544,71 @@ void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb) int stats_counter_off; struct odp_flow_key key; struct tbl_node *flow_node; + struct sw_flow *flow; + struct sw_flow_actions *acts; + struct loop_counter *loop; + int error; OVS_CB(skb)->dp_port = p; - if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) { - if (dp->drop_frags) { - kfree_skb(skb); - stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); - goto out; - } + /* Extract flow from 'skb' into 'key'. */ + error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key); + if (unlikely(error)) { + kfree_skb(skb); + return; } + if (OVS_CB(skb)->is_frag && dp->drop_frags) { + kfree_skb(skb); + stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); + goto out; + } + + /* Look up flow. */ flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp); - if (flow_node) { - struct sw_flow *flow = flow_cast(flow_node); - struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts); - flow_used(flow, skb); - execute_actions(dp, skb, &key, acts->actions, acts->n_actions, - GFP_ATOMIC); - stats_counter_off = offsetof(struct dp_stats_percpu, n_hit); - } else { - stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); + if (unlikely(!flow_node)) { dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id); + stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); + goto out; + } + + flow = flow_cast(flow_node); + flow_used(flow, skb); + + acts = rcu_dereference(flow->sf_acts); + + /* Check whether we've looped too much. */ + loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()]; + if (unlikely(++loop->count > DP_MAX_LOOPS)) + loop->looping = true; + if (unlikely(loop->looping)) { + suppress_loop(dp, acts); + goto out_loop; } + /* Execute actions. */ + execute_actions(dp, skb, &key, acts->actions, acts->n_actions, GFP_ATOMIC); + stats_counter_off = offsetof(struct dp_stats_percpu, n_hit); + + /* Check whether sub-actions looped too much. */ + if (unlikely(loop->looping)) + suppress_loop(dp, acts); + +out_loop: + /* Decrement loop counter. */ + if (!--loop->count) + loop->looping = false; + put_cpu_var(dp_loop_counters); + out: + /* Update datapath statistics. */ local_bh_disable(); stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); (*(u64 *)((u8 *)stats + stats_counter_off))++; + write_seqcount_end(&stats->seqlock); + local_bh_enable(); } @@ -806,7 +869,11 @@ err_kfree_skb: err: local_bh_disable(); stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); stats->n_lost++; + write_seqcount_end(&stats->seqlock); + local_bh_enable(); return err; @@ -925,7 +992,7 @@ static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats, stats->n_packets = flow->packet_count; stats->n_bytes = flow->byte_count; - stats->ip_tos = flow->ip_tos; + stats->reserved = 0; stats->tcp_flags = flow->tcp_flags; stats->error = 0; } @@ -934,7 +1001,6 @@ static void clear_stats(struct sw_flow *flow) { flow->used = 0; flow->tcp_flags = 0; - flow->ip_tos = 0; flow->packet_count = 0; flow->byte_count = 0; } @@ -1299,7 +1365,9 @@ static int do_execute(struct datapath *dp, const struct odp_execute *execute) else skb->protocol = htons(ETH_P_802_2); - flow_extract(skb, execute->in_port, &key); + err = flow_extract(skb, execute->in_port, &key); + if (err) + goto error_free_skb; rcu_read_lock(); err = execute_actions(dp, skb, &key, actions->actions, @@ -1341,12 +1409,21 @@ static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp) stats.max_groups = DP_MAX_GROUPS; stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0; for_each_possible_cpu(i) { - const struct dp_stats_percpu *s; - s = per_cpu_ptr(dp->stats_percpu, i); - stats.n_frags += s->n_frags; - stats.n_hit += s->n_hit; - stats.n_missed += s->n_missed; - stats.n_lost += s->n_lost; + const struct dp_stats_percpu *percpu_stats; + struct dp_stats_percpu local_stats; + unsigned seqcount; + + percpu_stats = per_cpu_ptr(dp->stats_percpu, i); + + do { + seqcount = read_seqcount_begin(&percpu_stats->seqlock); + local_stats = *percpu_stats; + } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount)); + + stats.n_frags += local_stats.n_frags; + stats.n_hit += local_stats.n_hit; + stats.n_missed += local_stats.n_missed; + stats.n_lost += local_stats.n_lost; } stats.max_miss_queue = DP_MAX_QUEUE_LEN; stats.max_action_queue = DP_MAX_QUEUE_LEN;