+handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
+{
+ struct odp_msg *msg = packet->data;
+
+ switch (msg->type) {
+ case _ODPL_ACTION_NR:
+ COVERAGE_INC(ofproto_ctlr_action);
+ send_packet_in(p, packet);
+ break;
+
+ case _ODPL_SFLOW_NR:
+ if (p->sflow) {
+ ofproto_sflow_received(p->sflow, msg);
+ }
+ ofpbuf_delete(packet);
+ break;
+
+ case _ODPL_MISS_NR:
+ handle_odp_miss_msg(p, packet);
+ break;
+
+ default:
+ VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
+ msg->type);
+ break;
+ }
+}
+\f
+/* Flow expiration. */
+
+struct expire_cbdata {
+ struct ofproto *ofproto;
+ int dp_max_idle;
+};
+
+static int ofproto_dp_max_idle(const struct ofproto *);
+static void ofproto_update_used(struct ofproto *);
+static void rule_expire(struct cls_rule *, void *cbdata);
+
+/* This function is called periodically by ofproto_run(). Its job is to
+ * collect updates for the flows that have been installed into the datapath,
+ * most importantly when they last were used, and then use that information to
+ * expire flows that have not been used recently.
+ *
+ * Returns the number of milliseconds after which it should be called again. */
+static int
+ofproto_expire(struct ofproto *ofproto)
+{
+ struct expire_cbdata cbdata;
+
+ /* Update 'used' for each flow in the datapath. */
+ ofproto_update_used(ofproto);
+
+ /* Expire idle flows.
+ *
+ * A wildcarded flow is idle only when all of its subrules have expired due
+ * to becoming idle, so iterate through the exact-match flows first. */
+ cbdata.ofproto = ofproto;
+ cbdata.dp_max_idle = ofproto_dp_max_idle(ofproto);
+ classifier_for_each(&ofproto->cls, CLS_INC_EXACT, rule_expire, &cbdata);
+ classifier_for_each(&ofproto->cls, CLS_INC_WILD, rule_expire, &cbdata);
+
+ /* Let the hook know that we're at a stable point: all outstanding data
+ * in existing flows has been accounted to the account_cb. Thus, the
+ * hook can now reasonably do operations that depend on having accurate
+ * flow volume accounting (currently, that's just bond rebalancing). */
+ if (ofproto->ofhooks->account_checkpoint_cb) {
+ ofproto->ofhooks->account_checkpoint_cb(ofproto->aux);
+ }
+
+ return MIN(cbdata.dp_max_idle, 1000);
+}
+
+/* Update 'used' member of each flow currently installed into the datapath. */
+static void
+ofproto_update_used(struct ofproto *p)
+{
+ struct odp_flow *flows;
+ size_t n_flows;
+ size_t i;
+ int error;
+
+ error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
+ if (error) {
+ return;
+ }
+
+ for (i = 0; i < n_flows; i++) {
+ struct odp_flow *f = &flows[i];
+ struct rule *rule;
+
+ rule = rule_from_cls_rule(
+ classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
+
+ if (rule && rule->installed) {
+ update_time(p, rule, &f->stats);
+ rule_account(p, rule, f->stats.n_bytes);
+ } else {
+ /* There's a flow in the datapath that we know nothing about.
+ * Delete it. */
+ COVERAGE_INC(ofproto_unexpected_rule);
+ dpif_flow_del(p->dpif, f);
+ }
+
+ }
+ free(flows);
+}
+
+/* Calculates and returns the number of milliseconds of idle time after which
+ * flows should expire from the datapath and we should fold their statistics
+ * into their parent rules in userspace. */
+static int
+ofproto_dp_max_idle(const struct ofproto *ofproto)
+{
+ /*
+ * Idle time histogram.
+ *
+ * Most of the time a switch has a relatively small number of flows. When
+ * this is the case we might as well keep statistics for all of them in
+ * userspace and to cache them in the kernel datapath for performance as
+ * well.
+ *
+ * As the number of flows increases, the memory required to maintain
+ * statistics about them in userspace and in the kernel becomes
+ * significant. However, with a large number of flows it is likely that
+ * only a few of them are "heavy hitters" that consume a large amount of
+ * bandwidth. At this point, only heavy hitters are worth caching in the
+ * kernel and maintaining in userspaces; other flows we can discard.
+ *
+ * The technique used to compute the idle time is to build a histogram with
+ * N_BUCKETS bucket whose width is BUCKET_WIDTH msecs each. Each flow that
+ * is installed in the kernel gets dropped in the appropriate bucket.
+ * After the histogram has been built, we compute the cutoff so that only
+ * the most-recently-used 1% of flows (but at least 1000 flows) are kept
+ * cached. At least the most-recently-used bucket of flows is kept, so
+ * actually an arbitrary number of flows can be kept in any given
+ * expiration run (though the next run will delete most of those unless
+ * they receive additional data).
+ *
+ * This requires a second pass through the exact-match flows, in addition
+ * to the pass made by ofproto_update_used(), because the former function
+ * never looks at uninstallable flows.
+ */
+ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
+ enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
+ int buckets[N_BUCKETS] = { 0 };
+ int total, bucket;
+ struct rule *rule;
+ long long int now;
+ int i;
+
+ total = classifier_count_exact(&ofproto->cls);
+ if (total <= 1000) {
+ return N_BUCKETS * BUCKET_WIDTH;
+ }
+
+ /* Build histogram. */
+ now = time_msec();
+ CLASSIFIER_FOR_EACH_EXACT_RULE (rule, cr, &ofproto->cls) {
+ long long int idle = now - rule->used;
+ int bucket = (idle <= 0 ? 0
+ : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
+ : (unsigned int) idle / BUCKET_WIDTH);
+ buckets[bucket]++;
+ }
+
+ /* Find the first bucket whose flows should be expired. */
+ for (bucket = 0; bucket < N_BUCKETS; bucket++) {
+ if (buckets[bucket]) {
+ int subtotal = 0;
+ do {
+ subtotal += buckets[bucket++];
+ } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
+ break;
+ }
+ }
+
+ if (VLOG_IS_DBG_ENABLED()) {
+ struct ds s;
+
+ ds_init(&s);
+ ds_put_cstr(&s, "keep");
+ for (i = 0; i < N_BUCKETS; i++) {
+ if (i == bucket) {
+ ds_put_cstr(&s, ", drop");
+ }
+ if (buckets[i]) {
+ ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
+ }
+ }
+ VLOG_INFO("%s: %s (msec:count)",
+ dpif_name(ofproto->dpif), ds_cstr(&s));
+ ds_destroy(&s);
+ }
+
+ return bucket * BUCKET_WIDTH;
+}
+
+static void
+rule_active_timeout(struct ofproto *ofproto, struct rule *rule)
+{
+ if (ofproto->netflow && !is_controller_rule(rule) &&
+ netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
+ struct ofexpired expired;
+ struct odp_flow odp_flow;
+
+ /* Get updated flow stats.
+ *
+ * XXX We could avoid this call entirely if (1) ofproto_update_used()
+ * updated TCP flags and (2) the dpif_flow_list_all() in
+ * ofproto_update_used() zeroed TCP flags. */
+ memset(&odp_flow, 0, sizeof odp_flow);
+ if (rule->installed) {
+ odp_flow.key = rule->cr.flow;
+ odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
+ dpif_flow_get(ofproto->dpif, &odp_flow);
+
+ if (odp_flow.stats.n_packets) {
+ update_time(ofproto, rule, &odp_flow.stats);
+ netflow_flow_update_flags(&rule->nf_flow,
+ odp_flow.stats.tcp_flags);
+ }
+ }
+
+ expired.flow = rule->cr.flow;
+ expired.packet_count = rule->packet_count +
+ odp_flow.stats.n_packets;
+ expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
+ expired.used = rule->used;
+
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
+ }
+}
+
+/* If 'cls_rule' is an OpenFlow rule, that has expired according to OpenFlow
+ * rules, then delete it entirely.
+ *
+ * If 'cls_rule' is a subrule, that has not been used recently, remove it from
+ * the datapath and fold its statistics back into its super-rule.
+ *
+ * (This is a callback function for classifier_for_each().) */
+static void
+rule_expire(struct cls_rule *cls_rule, void *cbdata_)
+{
+ struct expire_cbdata *cbdata = cbdata_;
+ struct ofproto *ofproto = cbdata->ofproto;
+ struct rule *rule = rule_from_cls_rule(cls_rule);
+ long long int hard_expire, idle_expire, expire, now;
+
+ /* Calculate OpenFlow expiration times for 'rule'. */
+ hard_expire = (rule->hard_timeout
+ ? rule->created + rule->hard_timeout * 1000
+ : LLONG_MAX);
+ idle_expire = (rule->idle_timeout
+ && (rule->super || list_is_empty(&rule->list))
+ ? rule->used + rule->idle_timeout * 1000
+ : LLONG_MAX);
+ expire = MIN(hard_expire, idle_expire);
+
+ now = time_msec();
+ if (now < expire) {
+ /* 'rule' has not expired according to OpenFlow rules. */
+ if (!rule->cr.wc.wildcards) {
+ if (now >= rule->used + cbdata->dp_max_idle) {
+ /* This rule is idle, so drop it to free up resources. */
+ if (rule->super) {
+ /* It's not part of the OpenFlow flow table, so we can
+ * delete it entirely and fold its statistics into its
+ * super-rule. */
+ rule_remove(ofproto, rule);
+ } else {
+ /* It is part of the OpenFlow flow table, so we have to
+ * keep the rule but we can at least uninstall it from the
+ * datapath. */
+ rule_uninstall(ofproto, rule);
+ }
+ } else {
+ /* Send NetFlow active timeout if appropriate. */
+ rule_active_timeout(cbdata->ofproto, rule);
+ }
+ }
+ } else {
+ /* 'rule' has expired according to OpenFlow rules. */
+ COVERAGE_INC(ofproto_expired);
+
+ /* Update stats. (This is a no-op if the rule expired due to an idle
+ * timeout, because that only happens when the rule has no subrules
+ * left.) */
+ if (rule->cr.wc.wildcards) {
+ struct rule *subrule, *next;
+ LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
+ rule_remove(cbdata->ofproto, subrule);
+ }
+ } else {
+ rule_uninstall(cbdata->ofproto, rule);
+ }
+
+ /* Get rid of the rule. */
+ if (!rule_is_hidden(rule)) {
+ send_flow_removed(cbdata->ofproto, rule, now,
+ (now >= hard_expire
+ ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
+ }
+ rule_remove(cbdata->ofproto, rule);
+ }
+}
+\f
+static void
+revalidate_cb(struct cls_rule *sub_, void *cbdata_)