struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
int stats_counter_off;
- struct odp_flow_key key;
- struct tbl_node *flow_node;
- struct sw_flow *flow;
struct sw_flow_actions *acts;
struct loop_counter *loop;
int error;
OVS_CB(skb)->dp_port = p;
- /* Extract flow from 'skb' into 'key'. */
- error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key);
- if (unlikely(error)) {
- kfree_skb(skb);
- return;
- }
+ if (!OVS_CB(skb)->flow) {
+ struct odp_flow_key key;
+ struct tbl_node *flow_node;
- if (OVS_CB(skb)->is_frag && dp->drop_frags) {
- kfree_skb(skb);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
- goto out;
- }
+ /* Extract flow from 'skb' into 'key'. */
+ error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key);
+ if (unlikely(error)) {
+ kfree_skb(skb);
+ return;
+ }
- /* Look up flow. */
- flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
- if (unlikely(!flow_node)) {
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
- goto out;
+ if (OVS_CB(skb)->is_frag && dp->drop_frags) {
+ kfree_skb(skb);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
+ goto out;
+ }
+
+ /* Look up flow. */
+ flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
+ flow_hash(&key), flow_cmp);
+ if (unlikely(!flow_node)) {
+ dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ goto out;
+ }
+
+ OVS_CB(skb)->flow = flow_cast(flow_node);
}
- flow = flow_cast(flow_node);
- flow_used(flow, skb);
+ flow_used(OVS_CB(skb)->flow, skb);
- acts = rcu_dereference(flow->sf_acts);
+ acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
/* Check whether we've looped too much. */
loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()];
}
/* Execute actions. */
- execute_actions(dp, skb, &key, acts->actions, acts->n_actions, GFP_ATOMIC);
+ execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
+ acts->n_actions, GFP_ATOMIC);
stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
/* Check whether sub-actions looped too much. */
/**
* struct ovs_skb_cb - OVS data in skb CB
* @dp_port: The datapath port on which the skb entered the switch.
+ * @flow: The flow associated with this packet. May be %NULL if no flow.
+ * @is_frag: %true if this packet is an IPv4 fragment, %false otherwise.
* @ip_summed: Consistently stores L4 checksumming status across different
* kernel versions.
* @tun_id: ID (in network byte order) of the tunnel that encapsulated this
* packet. It is 0 if the packet was not received on a tunnel.
- * @is_frag: %true if this packet is an IPv4 fragment, %false otherwise.
*/
struct ovs_skb_cb {
struct dp_port *dp_port;
+ struct sw_flow *flow;
+ bool is_frag;
enum csum_type ip_summed;
__be32 tun_id;
- bool is_frag;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
skb_reset_mac_header(skb);
compute_ip_summed(skb, true);
+ OVS_CB(skb)->flow = NULL;
vport_receive(vport, skb);
struct vport_ops internal_vport_ops = {
.type = "internal",
- .flags = VPORT_F_REQUIRED | VPORT_F_GEN_STATS,
+ .flags = VPORT_F_REQUIRED | VPORT_F_GEN_STATS | VPORT_F_FLOW,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
.attach = internal_dev_attach,