X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fdatapath.c;h=390acc8a4e5f8a029fbf207b58c6eb45593aab3a;hb=3976f6d57b1134c5c3ed054c9da4aa6786fbf5bf;hp=e46819876b9caf11a8748230db675bb06f3dd6ea;hpb=55574bb0d21541c13fe67545a74448b36063e461;p=openvswitch diff --git a/datapath/datapath.c b/datapath/datapath.c index e4681987..390acc8a 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -8,6 +8,8 @@ /* Functions for managing the dp interface/device. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -27,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -531,8 +532,8 @@ out: static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions) { if (net_ratelimit()) - printk(KERN_WARNING "%s: flow looped %d times, dropping\n", - dp_name(dp), DP_MAX_LOOPS); + pr_warn("%s: flow looped %d times, dropping\n", + dp_name(dp), DP_MAX_LOOPS); actions->n_actions = 0; } @@ -542,35 +543,44 @@ void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb) struct datapath *dp = p->dp; struct dp_stats_percpu *stats; int stats_counter_off; - struct odp_flow_key key; - struct tbl_node *flow_node; - struct sw_flow *flow; struct sw_flow_actions *acts; struct loop_counter *loop; + int error; OVS_CB(skb)->dp_port = p; - /* Extract flow from 'skb' into 'key'. */ - if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) { - if (dp->drop_frags) { + if (!OVS_CB(skb)->flow) { + struct odp_flow_key key; + struct tbl_node *flow_node; + + /* Extract flow from 'skb' into 'key'. */ + error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key); + if (unlikely(error)) { + kfree_skb(skb); + return; + } + + if (OVS_CB(skb)->is_frag && dp->drop_frags) { kfree_skb(skb); stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); goto out; } - } - /* Look up flow. */ - flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp); - if (unlikely(!flow_node)) { - dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id); - stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); - goto out; + /* Look up flow. */ + flow_node = tbl_lookup(rcu_dereference(dp->table), &key, + flow_hash(&key), flow_cmp); + if (unlikely(!flow_node)) { + dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id); + stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); + goto out; + } + + OVS_CB(skb)->flow = flow_cast(flow_node); } - flow = flow_cast(flow_node); - flow_used(flow, skb); + flow_used(OVS_CB(skb)->flow, skb); - acts = rcu_dereference(flow->sf_acts); + acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); /* Check whether we've looped too much. */ loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()]; @@ -582,7 +592,8 @@ void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb) } /* Execute actions. */ - execute_actions(dp, skb, &key, acts->actions, acts->n_actions, GFP_ATOMIC); + execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions, + acts->n_actions, GFP_ATOMIC); stats_counter_off = offsetof(struct dp_stats_percpu, n_hit); /* Check whether sub-actions looped too much. */ @@ -599,7 +610,11 @@ out: /* Update datapath statistics. */ local_bh_disable(); stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); (*(u64 *)((u8 *)stats + stats_counter_off))++; + write_seqcount_end(&stats->seqlock); + local_bh_enable(); } @@ -636,9 +651,9 @@ int vswitch_skb_checksum_setup(struct sk_buff *skb) break; default: if (net_ratelimit()) - printk(KERN_ERR "Attempting to checksum a non-" - "TCP/UDP packet, dropping a protocol" - " %d packet", iph->protocol); + pr_err("Attempting to checksum a non-TCP/UDP packet, " + "dropping a protocol %d packet", + iph->protocol); goto out; } @@ -741,11 +756,10 @@ void compute_ip_summed(struct sk_buff *skb, bool xmit) break; #endif default: - printk(KERN_ERR "openvswitch: unknown checksum type %d\n", - skb->ip_summed); + pr_err("unknown checksum type %d\n", skb->ip_summed); /* None seems the safest... */ OVS_CB(skb)->ip_summed = OVS_CSUM_NONE; - } + } #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID) /* Xen has a special way of representing CHECKSUM_PARTIAL on older @@ -860,7 +874,11 @@ err_kfree_skb: err: local_bh_disable(); stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); stats->n_lost++; + write_seqcount_end(&stats->seqlock); + local_bh_enable(); return err; @@ -1036,12 +1054,12 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, } /* Allocate flow. */ - error = -ENOMEM; - flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); - if (flow == NULL) + flow = flow_alloc(); + if (IS_ERR(flow)) { + error = PTR_ERR(flow); goto error; + } flow->key = uf->flow.key; - spin_lock_init(&flow->lock); clear_stats(flow); /* Obtain actions. */ @@ -1096,7 +1114,8 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, error_free_flow_acts: kfree(flow->sf_acts); error_free_flow: - kmem_cache_free(flow_cache, flow); + flow->sf_acts = NULL; + flow_put(flow); error: return error; } @@ -1312,10 +1331,11 @@ static int do_execute(struct datapath *dp, const struct odp_execute *execute) if (execute->length < ETH_HLEN || execute->length > 65535) goto error; - err = -ENOMEM; actions = flow_actions_alloc(execute->n_actions); - if (!actions) + if (IS_ERR(actions)) { + err = PTR_ERR(actions); goto error; + } err = -EFAULT; if (copy_from_user(actions->actions, execute->actions, @@ -1352,7 +1372,9 @@ static int do_execute(struct datapath *dp, const struct odp_execute *execute) else skb->protocol = htons(ETH_P_802_2); - flow_extract(skb, execute->in_port, &key); + err = flow_extract(skb, execute->in_port, &key); + if (err) + goto error_free_skb; rcu_read_lock(); err = execute_actions(dp, skb, &key, actions->actions, @@ -1394,12 +1416,21 @@ static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp) stats.max_groups = DP_MAX_GROUPS; stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0; for_each_possible_cpu(i) { - const struct dp_stats_percpu *s; - s = per_cpu_ptr(dp->stats_percpu, i); - stats.n_frags += s->n_frags; - stats.n_hit += s->n_hit; - stats.n_missed += s->n_missed; - stats.n_lost += s->n_lost; + const struct dp_stats_percpu *percpu_stats; + struct dp_stats_percpu local_stats; + unsigned seqcount; + + percpu_stats = per_cpu_ptr(dp->stats_percpu, i); + + do { + seqcount = read_seqcount_begin(&percpu_stats->seqlock); + local_stats = *percpu_stats; + } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount)); + + stats.n_frags += local_stats.n_frags; + stats.n_hit += local_stats.n_hit; + stats.n_missed += local_stats.n_missed; + stats.n_lost += local_stats.n_lost; } stats.max_miss_queue = DP_MAX_QUEUE_LEN; stats.max_action_queue = DP_MAX_QUEUE_LEN; @@ -2259,7 +2290,7 @@ ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes, } success: copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes); - + retval = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { if (copy_bytes == skb->len) {