+/* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
+ * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
+ * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
+ * what a flow key should contain.
+ *
+ * This function also includes some logic to help make VLAN splinters
+ * transparent to the rest of the upcall processing logic. In particular, if
+ * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
+ * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
+ * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
+ *
+ * Sets '*initial_tci' to the VLAN TCI with which the packet was really
+ * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
+ * (This differs from the value returned in flow->vlan_tci only for packets
+ * received on VLAN splinters.)
+ */
+static enum odp_key_fitness
+ofproto_dpif_extract_flow_key(const struct ofproto_dpif *ofproto,
+ const struct nlattr *key, size_t key_len,
+ struct flow *flow, ovs_be16 *initial_tci,
+ struct ofpbuf *packet)
+{
+ enum odp_key_fitness fitness;
+
+ fitness = odp_flow_key_to_flow(key, key_len, flow);
+ if (fitness == ODP_FIT_ERROR) {
+ return fitness;
+ }
+ *initial_tci = flow->vlan_tci;
+
+ if (vsp_adjust_flow(ofproto, flow)) {
+ if (packet) {
+ /* Make the packet resemble the flow, so that it gets sent to an
+ * OpenFlow controller properly, so that it looks correct for
+ * sFlow, and so that flow_extract() will get the correct vlan_tci
+ * if it is called on 'packet'.
+ *
+ * The allocated space inside 'packet' probably also contains
+ * 'key', that is, both 'packet' and 'key' are probably part of a
+ * struct dpif_upcall (see the large comment on that structure
+ * definition), so pushing data on 'packet' is in general not a
+ * good idea since it could overwrite 'key' or free it as a side
+ * effect. However, it's OK in this special case because we know
+ * that 'packet' is inside a Netlink attribute: pushing 4 bytes
+ * will just overwrite the 4-byte "struct nlattr", which is fine
+ * since we don't need that header anymore. */
+ eth_push_vlan(packet, flow->vlan_tci);
+ }
+
+ /* Let the caller know that we can't reproduce 'key' from 'flow'. */
+ if (fitness == ODP_FIT_PERFECT) {
+ fitness = ODP_FIT_TOO_MUCH;
+ }
+ }
+
+ return fitness;
+}
+
+static void
+handle_miss_upcalls(struct ofproto_dpif *ofproto, struct dpif_upcall *upcalls,
+ size_t n_upcalls)
+{
+ struct dpif_upcall *upcall;
+ struct flow_miss *miss;
+ struct flow_miss misses[FLOW_MISS_MAX_BATCH];
+ struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
+ struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
+ struct hmap todo;
+ int n_misses;
+ size_t n_ops;
+ size_t i;
+
+ if (!n_upcalls) {
+ return;
+ }
+
+ /* Construct the to-do list.
+ *
+ * This just amounts to extracting the flow from each packet and sticking
+ * the packets that have the same flow in the same "flow_miss" structure so
+ * that we can process them together. */
+ hmap_init(&todo);
+ n_misses = 0;
+ for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
+ struct flow_miss *miss = &misses[n_misses];
+ struct flow_miss *existing_miss;
+ uint32_t hash;
+
+ /* Obtain metadata and check userspace/kernel agreement on flow match,
+ * then set 'flow''s header pointers. */
+ miss->key_fitness = ofproto_dpif_extract_flow_key(
+ ofproto, upcall->key, upcall->key_len,
+ &miss->flow, &miss->initial_tci, upcall->packet);
+ if (miss->key_fitness == ODP_FIT_ERROR) {
+ continue;
+ }
+ flow_extract(upcall->packet, miss->flow.skb_priority,
+ miss->flow.tun_id, miss->flow.in_port, &miss->flow);
+
+ /* Add other packets to a to-do list. */
+ hash = flow_hash(&miss->flow, 0);
+ existing_miss = flow_miss_find(&todo, &miss->flow, hash);
+ if (!existing_miss) {
+ hmap_insert(&todo, &miss->hmap_node, hash);
+ miss->key = upcall->key;
+ miss->key_len = upcall->key_len;
+ miss->upcall_type = upcall->type;
+ list_init(&miss->packets);
+
+ n_misses++;
+ } else {
+ miss = existing_miss;
+ }
+ list_push_back(&miss->packets, &upcall->packet->list_node);
+ }
+
+ /* Process each element in the to-do list, constructing the set of
+ * operations to batch. */
+ n_ops = 0;
+ HMAP_FOR_EACH (miss, hmap_node, &todo) {
+ handle_flow_miss(ofproto, miss, flow_miss_ops, &n_ops);
+ }
+ assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
+
+ /* Execute batch. */
+ for (i = 0; i < n_ops; i++) {
+ dpif_ops[i] = &flow_miss_ops[i].dpif_op;
+ }
+ dpif_operate(ofproto->dpif, dpif_ops, n_ops);
+
+ /* Free memory and update facets. */
+ for (i = 0; i < n_ops; i++) {
+ struct flow_miss_op *op = &flow_miss_ops[i];
+
+ switch (op->dpif_op.type) {
+ case DPIF_OP_EXECUTE:
+ break;
+
+ case DPIF_OP_FLOW_PUT:
+ if (!op->dpif_op.error) {
+ op->subfacet->path = subfacet_want_path(op->subfacet->slow);
+ }
+ break;
+
+ case DPIF_OP_FLOW_DEL:
+ NOT_REACHED();
+ }
+
+ free(op->garbage);
+ }
+ hmap_destroy(&todo);
+}
+
+static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL }
+classify_upcall(const struct dpif_upcall *upcall)
+{
+ union user_action_cookie cookie;
+
+ /* First look at the upcall type. */
+ switch (upcall->type) {
+ case DPIF_UC_ACTION:
+ break;
+
+ case DPIF_UC_MISS:
+ return MISS_UPCALL;
+
+ case DPIF_N_UC_TYPES:
+ default:
+ VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
+ return BAD_UPCALL;
+ }
+
+ /* "action" upcalls need a closer look. */
+ memcpy(&cookie, &upcall->userdata, sizeof(cookie));
+ switch (cookie.type) {
+ case USER_ACTION_COOKIE_SFLOW:
+ return SFLOW_UPCALL;
+
+ case USER_ACTION_COOKIE_SLOW_PATH:
+ return MISS_UPCALL;
+
+ case USER_ACTION_COOKIE_UNSPEC:
+ default:
+ VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
+ return BAD_UPCALL;
+ }
+}
+
+static void
+handle_sflow_upcall(struct ofproto_dpif *ofproto,
+ const struct dpif_upcall *upcall)
+{
+ union user_action_cookie cookie;
+ enum odp_key_fitness fitness;
+ ovs_be16 initial_tci;
+ struct flow flow;
+
+ fitness = ofproto_dpif_extract_flow_key(ofproto, upcall->key,
+ upcall->key_len, &flow,
+ &initial_tci, upcall->packet);
+ if (fitness == ODP_FIT_ERROR) {
+ return;
+ }
+
+ memcpy(&cookie, &upcall->userdata, sizeof(cookie));
+ dpif_sflow_received(ofproto->sflow, upcall->packet, &flow, &cookie);
+}
+
+static int
+handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch)
+{
+ struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
+ struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
+ uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8];
+ int n_processed;
+ int n_misses;
+ int i;
+
+ assert(max_batch <= FLOW_MISS_MAX_BATCH);
+
+ n_misses = 0;
+ for (n_processed = 0; n_processed < max_batch; n_processed++) {
+ struct dpif_upcall *upcall = &misses[n_misses];
+ struct ofpbuf *buf = &miss_bufs[n_misses];
+ int error;
+
+ ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
+ sizeof miss_buf_stubs[n_misses]);
+ error = dpif_recv(ofproto->dpif, upcall, buf);
+ if (error) {
+ ofpbuf_uninit(buf);
+ break;
+ }
+
+ switch (classify_upcall(upcall)) {
+ case MISS_UPCALL:
+ /* Handle it later. */
+ n_misses++;
+ break;
+
+ case SFLOW_UPCALL:
+ if (ofproto->sflow) {
+ handle_sflow_upcall(ofproto, upcall);
+ }
+ ofpbuf_uninit(buf);
+ break;
+
+ case BAD_UPCALL:
+ ofpbuf_uninit(buf);
+ break;
+ }
+ }
+
+ /* Handle deferred MISS_UPCALL processing. */
+ handle_miss_upcalls(ofproto, misses, n_misses);
+ for (i = 0; i < n_misses; i++) {
+ ofpbuf_uninit(&miss_bufs[i]);
+ }
+
+ return n_processed;
+}
+\f
+/* Flow expiration. */
+
+static int subfacet_max_idle(const struct ofproto_dpif *);
+static void update_stats(struct ofproto_dpif *);
+static void rule_expire(struct rule_dpif *);
+static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
+
+/* This function is called periodically by run(). Its job is to collect
+ * updates for the flows that have been installed into the datapath, most
+ * importantly when they last were used, and then use that information to
+ * expire flows that have not been used recently.
+ *
+ * Returns the number of milliseconds after which it should be called again. */
+static int
+expire(struct ofproto_dpif *ofproto)
+{
+ struct rule_dpif *rule, *next_rule;
+ struct oftable *table;
+ int dp_max_idle;
+
+ /* Update stats for each flow in the datapath. */
+ update_stats(ofproto);
+
+ /* Expire subfacets that have been idle too long. */
+ dp_max_idle = subfacet_max_idle(ofproto);
+ expire_subfacets(ofproto, dp_max_idle);
+
+ /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
+ OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
+ struct cls_cursor cursor;
+
+ cls_cursor_init(&cursor, &table->cls, NULL);
+ CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
+ rule_expire(rule);
+ }
+ }
+
+ /* All outstanding data in existing flows has been accounted, so it's a
+ * good time to do bond rebalancing. */
+ if (ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
+
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ if (bundle->bond) {
+ bond_rebalance(bundle->bond, &ofproto->revalidate_set);
+ }
+ }
+ }
+
+ return MIN(dp_max_idle, 1000);
+}
+
+/* Updates flow table statistics given that the datapath just reported 'stats'
+ * as 'subfacet''s statistics. */
+static void
+update_subfacet_stats(struct subfacet *subfacet,
+ const struct dpif_flow_stats *stats)
+{
+ struct facet *facet = subfacet->facet;
+
+ if (stats->n_packets >= subfacet->dp_packet_count) {
+ uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
+ facet->packet_count += extra;
+ } else {
+ VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
+ }
+
+ if (stats->n_bytes >= subfacet->dp_byte_count) {
+ facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
+ } else {
+ VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
+ }
+
+ subfacet->dp_packet_count = stats->n_packets;
+ subfacet->dp_byte_count = stats->n_bytes;
+
+ facet->tcp_flags |= stats->tcp_flags;
+
+ subfacet_update_time(subfacet, stats->used);
+ if (facet->accounted_bytes < facet->byte_count) {
+ facet_learn(facet);
+ facet_account(facet);
+ facet->accounted_bytes = facet->byte_count;
+ }
+ facet_push_stats(facet);
+}
+
+/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
+ * about, or a flow that shouldn't be installed but was anyway. Delete it. */
+static void
+delete_unexpected_flow(struct dpif *dpif,
+ const struct nlattr *key, size_t key_len)
+{
+ if (!VLOG_DROP_WARN(&rl)) {
+ struct ds s;
+
+ ds_init(&s);
+ odp_flow_key_format(key, key_len, &s);
+ VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s));
+ ds_destroy(&s);
+ }
+
+ COVERAGE_INC(facet_unexpected);
+ dpif_flow_del(dpif, key, key_len, NULL);
+}
+
+/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
+ *
+ * This function also pushes statistics updates to rules which each facet
+ * resubmits into. Generally these statistics will be accurate. However, if a
+ * facet changes the rule it resubmits into at some time in between
+ * update_stats() runs, it is possible that statistics accrued to the
+ * old rule will be incorrectly attributed to the new rule. This could be
+ * avoided by calling update_stats() whenever rules are created or
+ * deleted. However, the performance impact of making so many calls to the
+ * datapath do not justify the benefit of having perfectly accurate statistics.
+ */
+static void
+update_stats(struct ofproto_dpif *p)
+{
+ const struct dpif_flow_stats *stats;
+ struct dpif_flow_dump dump;
+ const struct nlattr *key;
+ size_t key_len;
+
+ dpif_flow_dump_start(&dump, p->dpif);
+ while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
+ struct subfacet *subfacet;
+
+ subfacet = subfacet_find(p, key, key_len);
+ switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
+ case SF_FAST_PATH:
+ update_subfacet_stats(subfacet, stats);
+ break;
+
+ case SF_SLOW_PATH:
+ /* Stats are updated per-packet. */
+ break;
+
+ case SF_NOT_INSTALLED:
+ default:
+ delete_unexpected_flow(p->dpif, key, key_len);
+ break;
+ }
+ }
+ dpif_flow_dump_done(&dump);
+}
+
+/* Calculates and returns the number of milliseconds of idle time after which
+ * subfacets should expire from the datapath. When a subfacet expires, we fold
+ * its statistics into its facet, and when a facet's last subfacet expires, we
+ * fold its statistic into its rule. */
+static int
+subfacet_max_idle(const struct ofproto_dpif *ofproto)
+{
+ /*
+ * Idle time histogram.
+ *
+ * Most of the time a switch has a relatively small number of subfacets.
+ * When this is the case we might as well keep statistics for all of them
+ * in userspace and to cache them in the kernel datapath for performance as
+ * well.
+ *
+ * As the number of subfacets increases, the memory required to maintain
+ * statistics about them in userspace and in the kernel becomes
+ * significant. However, with a large number of subfacets it is likely
+ * that only a few of them are "heavy hitters" that consume a large amount
+ * of bandwidth. At this point, only heavy hitters are worth caching in
+ * the kernel and maintaining in userspaces; other subfacets we can
+ * discard.
+ *
+ * The technique used to compute the idle time is to build a histogram with
+ * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
+ * that is installed in the kernel gets dropped in the appropriate bucket.
+ * After the histogram has been built, we compute the cutoff so that only
+ * the most-recently-used 1% of subfacets (but at least
+ * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
+ * the most-recently-used bucket of subfacets is kept, so actually an
+ * arbitrary number of subfacets can be kept in any given expiration run
+ * (though the next run will delete most of those unless they receive
+ * additional data).
+ *
+ * This requires a second pass through the subfacets, in addition to the
+ * pass made by update_stats(), because the former function never looks at
+ * uninstallable subfacets.
+ */
+ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
+ enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
+ int buckets[N_BUCKETS] = { 0 };
+ int total, subtotal, bucket;
+ struct subfacet *subfacet;
+ long long int now;
+ int i;
+
+ total = hmap_count(&ofproto->subfacets);
+ if (total <= ofproto->up.flow_eviction_threshold) {
+ return N_BUCKETS * BUCKET_WIDTH;
+ }
+
+ /* Build histogram. */
+ now = time_msec();
+ HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
+ long long int idle = now - subfacet->used;
+ int bucket = (idle <= 0 ? 0
+ : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
+ : (unsigned int) idle / BUCKET_WIDTH);
+ buckets[bucket]++;
+ }
+
+ /* Find the first bucket whose flows should be expired. */
+ subtotal = bucket = 0;
+ do {
+ subtotal += buckets[bucket++];
+ } while (bucket < N_BUCKETS &&
+ subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
+
+ if (VLOG_IS_DBG_ENABLED()) {
+ struct ds s;
+
+ ds_init(&s);
+ ds_put_cstr(&s, "keep");
+ for (i = 0; i < N_BUCKETS; i++) {
+ if (i == bucket) {
+ ds_put_cstr(&s, ", drop");
+ }
+ if (buckets[i]) {
+ ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
+ }
+ }
+ VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
+ ds_destroy(&s);
+ }
+
+ return bucket * BUCKET_WIDTH;
+}
+
+enum { EXPIRE_MAX_BATCH = 50 };
+
+static void
+expire_batch(struct ofproto_dpif *ofproto, struct subfacet **subfacets, int n)
+{
+ struct odputil_keybuf keybufs[EXPIRE_MAX_BATCH];
+ struct dpif_op ops[EXPIRE_MAX_BATCH];
+ struct dpif_op *opsp[EXPIRE_MAX_BATCH];
+ struct ofpbuf keys[EXPIRE_MAX_BATCH];
+ struct dpif_flow_stats stats[EXPIRE_MAX_BATCH];
+ int i;
+
+ for (i = 0; i < n; i++) {
+ ops[i].type = DPIF_OP_FLOW_DEL;
+ subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
+ ops[i].u.flow_del.key = keys[i].data;
+ ops[i].u.flow_del.key_len = keys[i].size;
+ ops[i].u.flow_del.stats = &stats[i];
+ opsp[i] = &ops[i];
+ }
+
+ dpif_operate(ofproto->dpif, opsp, n);
+ for (i = 0; i < n; i++) {
+ subfacet_reset_dp_stats(subfacets[i], &stats[i]);
+ subfacets[i]->path = SF_NOT_INSTALLED;
+ subfacet_destroy(subfacets[i]);
+ }
+}
+
+static void
+expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
+{
+ /* Cutoff time for most flows. */
+ long long int normal_cutoff = time_msec() - dp_max_idle;
+
+ /* We really want to keep flows for special protocols around, so use a more
+ * conservative cutoff. */
+ long long int special_cutoff = time_msec() - 10000;
+
+ struct subfacet *subfacet, *next_subfacet;
+ struct subfacet *batch[EXPIRE_MAX_BATCH];
+ int n_batch;
+
+ n_batch = 0;
+ HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
+ &ofproto->subfacets) {
+ long long int cutoff;
+
+ cutoff = (subfacet->slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)
+ ? special_cutoff
+ : normal_cutoff);
+ if (subfacet->used < cutoff) {
+ if (subfacet->path != SF_NOT_INSTALLED) {
+ batch[n_batch++] = subfacet;
+ if (n_batch >= EXPIRE_MAX_BATCH) {
+ expire_batch(ofproto, batch, n_batch);
+ n_batch = 0;
+ }
+ } else {
+ subfacet_destroy(subfacet);
+ }
+ }
+ }
+
+ if (n_batch > 0) {
+ expire_batch(ofproto, batch, n_batch);
+ }
+}
+
+/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
+ * then delete it entirely. */
+static void
+rule_expire(struct rule_dpif *rule)
+{
+ struct facet *facet, *next_facet;
+ long long int now;
+ uint8_t reason;
+
+ if (rule->up.pending) {
+ /* We'll have to expire it later. */
+ return;
+ }
+
+ /* Has 'rule' expired? */
+ now = time_msec();
+ if (rule->up.hard_timeout
+ && now > rule->up.modified + rule->up.hard_timeout * 1000) {
+ reason = OFPRR_HARD_TIMEOUT;
+ } else if (rule->up.idle_timeout
+ && now > rule->up.used + rule->up.idle_timeout * 1000) {
+ reason = OFPRR_IDLE_TIMEOUT;
+ } else {
+ return;
+ }
+
+ COVERAGE_INC(ofproto_dpif_expired);
+
+ /* Update stats. (This is a no-op if the rule expired due to an idle
+ * timeout, because that only happens when the rule has no facets left.) */
+ LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
+ facet_remove(facet);
+ }
+
+ /* Get rid of the rule. */
+ ofproto_rule_expire(&rule->up, reason);
+}
+\f
+/* Facets. */
+
+/* Creates and returns a new facet owned by 'rule', given a 'flow'.
+ *
+ * The caller must already have determined that no facet with an identical
+ * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
+ * the ofproto's classifier table.
+ *
+ * 'hash' must be the return value of flow_hash(flow, 0).
+ *
+ * The facet will initially have no subfacets. The caller should create (at
+ * least) one subfacet with subfacet_create(). */
+static struct facet *
+facet_create(struct rule_dpif *rule, const struct flow *flow, uint32_t hash)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
+ struct facet *facet;
+
+ facet = xzalloc(sizeof *facet);
+ facet->used = time_msec();
+ hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
+ list_push_back(&rule->facets, &facet->list_node);
+ facet->rule = rule;
+ facet->flow = *flow;
+ list_init(&facet->subfacets);
+ netflow_flow_init(&facet->nf_flow);
+ netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
+
+ return facet;
+}
+
+static void
+facet_free(struct facet *facet)
+{
+ free(facet);
+}
+
+/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
+ * 'packet', which arrived on 'in_port'.
+ *
+ * Takes ownership of 'packet'. */
+static bool
+execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
+ const struct nlattr *odp_actions, size_t actions_len,
+ struct ofpbuf *packet)
+{
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ int error;
+
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, flow);
+
+ error = dpif_execute(ofproto->dpif, key.data, key.size,
+ odp_actions, actions_len, packet);
+
+ ofpbuf_delete(packet);
+ return !error;
+}
+
+/* Remove 'facet' from 'ofproto' and free up the associated memory:
+ *
+ * - If 'facet' was installed in the datapath, uninstalls it and updates its
+ * rule's statistics, via subfacet_uninstall().
+ *
+ * - Removes 'facet' from its rule and from ofproto->facets.
+ */
+static void
+facet_remove(struct facet *facet)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct subfacet *subfacet, *next_subfacet;
+
+ assert(!list_is_empty(&facet->subfacets));
+
+ /* First uninstall all of the subfacets to get final statistics. */
+ LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
+ subfacet_uninstall(subfacet);
+ }
+
+ /* Flush the final stats to the rule.
+ *
+ * This might require us to have at least one subfacet around so that we
+ * can use its actions for accounting in facet_account(), which is why we
+ * have uninstalled but not yet destroyed the subfacets. */
+ facet_flush_stats(facet);
+
+ /* Now we're really all done so destroy everything. */
+ LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
+ &facet->subfacets) {
+ subfacet_destroy__(subfacet);
+ }
+ hmap_remove(&ofproto->facets, &facet->hmap_node);
+ list_remove(&facet->list_node);
+ facet_free(facet);
+}
+
+/* Feed information from 'facet' back into the learning table to keep it in
+ * sync with what is actually flowing through the datapath. */
+static void
+facet_learn(struct facet *facet)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct action_xlate_ctx ctx;
+
+ if (!facet->has_learn
+ && !facet->has_normal
+ && (!facet->has_fin_timeout
+ || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
+ return;
+ }
+
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
+ facet->flow.vlan_tci,
+ facet->rule, facet->tcp_flags, NULL);
+ ctx.may_learn = true;
+ xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
+ facet->rule->up.ofpacts_len);
+}
+
+static void
+facet_account(struct facet *facet)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct subfacet *subfacet;
+ const struct nlattr *a;
+ unsigned int left;
+ ovs_be16 vlan_tci;
+ uint64_t n_bytes;
+
+ if (!facet->has_normal || !ofproto->has_bonded_bundles) {
+ return;
+ }
+ n_bytes = facet->byte_count - facet->accounted_bytes;
+
+ /* This loop feeds byte counters to bond_account() for rebalancing to use
+ * as a basis. We also need to track the actual VLAN on which the packet
+ * is going to be sent to ensure that it matches the one passed to
+ * bond_choose_output_slave(). (Otherwise, we will account to the wrong
+ * hash bucket.)
+ *
+ * We use the actions from an arbitrary subfacet because they should all
+ * be equally valid for our purpose. */
+ subfacet = CONTAINER_OF(list_front(&facet->subfacets),
+ struct subfacet, list_node);
+ vlan_tci = facet->flow.vlan_tci;
+ NL_ATTR_FOR_EACH_UNSAFE (a, left,
+ subfacet->actions, subfacet->actions_len) {
+ const struct ovs_action_push_vlan *vlan;
+ struct ofport_dpif *port;
+
+ switch (nl_attr_type(a)) {
+ case OVS_ACTION_ATTR_OUTPUT:
+ port = get_odp_port(ofproto, nl_attr_get_u32(a));
+ if (port && port->bundle && port->bundle->bond) {
+ bond_account(port->bundle->bond, &facet->flow,
+ vlan_tci_to_vid(vlan_tci), n_bytes);
+ }
+ break;
+
+ case OVS_ACTION_ATTR_POP_VLAN:
+ vlan_tci = htons(0);
+ break;
+
+ case OVS_ACTION_ATTR_PUSH_VLAN:
+ vlan = nl_attr_get(a);
+ vlan_tci = vlan->vlan_tci;
+ break;
+ }
+ }
+}
+
+/* Returns true if the only action for 'facet' is to send to the controller.
+ * (We don't report NetFlow expiration messages for such facets because they
+ * are just part of the control logic for the network, not real traffic). */
+static bool
+facet_is_controller_flow(struct facet *facet)
+{
+ if (facet) {
+ const struct rule *rule = &facet->rule->up;
+ const struct ofpact *ofpacts = rule->ofpacts;
+ size_t ofpacts_len = rule->ofpacts_len;
+
+ if (ofpacts->type == OFPACT_CONTROLLER &&
+ ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Folds all of 'facet''s statistics into its rule. Also updates the
+ * accounting ofhook and emits a NetFlow expiration if appropriate. All of
+ * 'facet''s statistics in the datapath should have been zeroed and folded into
+ * its packet and byte counts before this function is called. */