X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=f44d8a2c00291c2f68e14e82070ec64e3057e1b7;hb=898bf89d807e140b7cf1966d2efc9c6410f71c9d;hp=859f416fe1312ea85baf5bef9103e64ab3d986ad;hpb=8497dd41214ddaac26928f2efa90becd1b336a52;p=openvswitch diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 859f416f..f44d8a2c 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -81,7 +81,7 @@ static void ofport_free(struct ofport *); static void hton_ofp_phy_port(struct ofp_phy_port *); static int xlate_actions(const union ofp_action *in, size_t n_in, - const flow_t *flow, struct ofproto *ofproto, + const struct flow *, struct ofproto *, const struct ofpbuf *packet, struct odp_actions *out, tag_type *tags, bool *may_set_up_flow, uint16_t *nf_output_iface); @@ -324,7 +324,7 @@ static const struct ofhooks default_ofhooks; static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); -static void ofproto_expire(struct ofproto *); +static int ofproto_expire(struct ofproto *); static void update_stats(struct ofproto *, struct rule *, const struct odp_flow_stats *); @@ -336,8 +336,6 @@ static void handle_odp_msg(struct ofproto *, struct ofpbuf *); static void handle_openflow(struct ofconn *, struct ofproto *, struct ofpbuf *); -static void refresh_port_groups(struct ofproto *); - static struct ofport *get_port(const struct ofproto *, uint16_t odp_port); static void update_port(struct ofproto *, const char *devname); static int init_ports(struct ofproto *); @@ -886,7 +884,6 @@ ofproto_set_sflow(struct ofproto *ofproto, struct ofport *ofport; os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif); - refresh_port_groups(ofproto); HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { ofproto_sflow_add_port(os, ofport->odp_port, netdev_get_name(ofport->netdev)); @@ -1147,9 +1144,9 @@ ofproto_run1(struct ofproto *p) } if (time_msec() >= p->next_expiration) { + int delay = ofproto_expire(p); + p->next_expiration = time_msec() + delay; COVERAGE_INC(ofproto_expiration); - ofproto_expire(p); - p->next_expiration = time_msec() + 1000; } if (p->netflow) { @@ -1247,8 +1244,50 @@ ofproto_is_alive(const struct ofproto *p) return !hmap_is_empty(&p->controllers); } +/* Deletes port number 'odp_port' from the datapath for 'ofproto'. + * + * This is almost the same as calling dpif_port_del() directly on the + * datapath, but it also makes 'ofproto' close its open netdev for the port + * (if any). This makes it possible to create a new netdev of a different + * type under the same name, which otherwise the netdev library would refuse + * to do because of the conflict. (The netdev would eventually get closed on + * the next trip through ofproto_run(), but this interface is more direct.) + * + * Returns 0 if successful, otherwise a positive errno. */ int -ofproto_send_packet(struct ofproto *p, const flow_t *flow, +ofproto_port_del(struct ofproto *ofproto, uint16_t odp_port) +{ + struct ofport *ofport = get_port(ofproto, odp_port); + const char *name = ofport ? (char *) ofport->opp.name : ""; + int error; + + error = dpif_port_del(ofproto->dpif, odp_port); + if (error) { + VLOG_ERR("%s: failed to remove port %"PRIu16" (%s) interface (%s)", + dpif_name(ofproto->dpif), odp_port, name, strerror(error)); + } else if (ofport) { + /* 'name' is ofport->opp.name and update_port() is going to destroy + * 'ofport'. Just in case update_port() refers to 'name' after it + * destroys 'ofport', make a copy of it around the update_port() + * call. */ + char *devname = xstrdup(name); + update_port(ofproto, devname); + free(devname); + } + return error; +} + +/* Checks if 'ofproto' thinks 'odp_port' should be included in floods. Returns + * true if 'odp_port' exists and should be included, false otherwise. */ +bool +ofproto_port_is_floodable(struct ofproto *ofproto, uint16_t odp_port) +{ + struct ofport *ofport = get_port(ofproto, odp_port); + return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD); +} + +int +ofproto_send_packet(struct ofproto *p, const struct flow *flow, const union ofp_action *actions, size_t n_actions, const struct ofpbuf *packet) { @@ -1263,14 +1302,13 @@ ofproto_send_packet(struct ofproto *p, const flow_t *flow, /* XXX Should we translate the dpif_execute() errno value into an OpenFlow * error code? */ - dpif_execute(p->dpif, flow->in_port, odp_actions.actions, - odp_actions.n_actions, packet); + dpif_execute(p->dpif, odp_actions.actions, odp_actions.n_actions, packet); return 0; } void -ofproto_add_flow(struct ofproto *p, - const flow_t *flow, uint32_t wildcards, unsigned int priority, +ofproto_add_flow(struct ofproto *p, const struct flow *flow, + uint32_t wildcards, unsigned int priority, const union ofp_action *actions, size_t n_actions, int idle_timeout) { @@ -1283,7 +1321,7 @@ ofproto_add_flow(struct ofproto *p, } void -ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow, +ofproto_delete_flow(struct ofproto *ofproto, const struct flow *flow, uint32_t wildcards, unsigned int priority) { struct rule *rule; @@ -1334,6 +1372,8 @@ reinit_ports(struct ofproto *p) size_t n_odp_ports; size_t i; + COVERAGE_INC(ofproto_reinit_ports); + svec_init(&devnames); HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { svec_add (&devnames, (char *) ofport->opp.name); @@ -1351,38 +1391,6 @@ reinit_ports(struct ofproto *p) svec_destroy(&devnames); } -static size_t -refresh_port_group(struct ofproto *p, unsigned int group) -{ - uint16_t *ports; - size_t n_ports; - struct ofport *port; - - assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD); - - ports = xmalloc(hmap_count(&p->ports) * sizeof *ports); - n_ports = 0; - HMAP_FOR_EACH (port, hmap_node, &p->ports) { - if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) { - ports[n_ports++] = port->odp_port; - } - } - dpif_port_group_set(p->dpif, group, ports, n_ports); - free(ports); - - return n_ports; -} - -static void -refresh_port_groups(struct ofproto *p) -{ - size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD); - size_t n_all = refresh_port_group(p, DP_GROUP_ALL); - if (p->sflow) { - ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all); - } -} - static struct ofport * make_ofport(const struct odp_port *odp_port) { @@ -1481,9 +1489,6 @@ send_port_status(struct ofproto *p, const struct ofport *ofport, hton_ofp_phy_port(&ops->desc); queue_tx(b, ofconn, NULL); } - if (p->ofhooks->port_changed_cb) { - p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux); - } } static void @@ -1601,9 +1606,6 @@ update_port(struct ofproto *p, const char *devname) : !new_ofport ? OFPPR_DELETE : OFPPR_MODIFY)); ofport_free(old_ofport); - - /* Update port groups. */ - refresh_port_groups(p); } static int @@ -1629,7 +1631,6 @@ init_ports(struct ofproto *p) } } free(ports); - refresh_port_groups(p); return 0; } @@ -1952,8 +1953,7 @@ execute_odp_actions(struct ofproto *ofproto, uint16_t in_port, } else { int error; - error = dpif_execute(ofproto->dpif, in_port, - actions, n_actions, packet); + error = dpif_execute(ofproto->dpif, actions, n_actions, packet); ofpbuf_delete(packet); return !error; } @@ -1978,7 +1978,7 @@ execute_odp_actions(struct ofproto *ofproto, uint16_t in_port, * Takes ownership of 'packet'. */ static void rule_execute(struct ofproto *ofproto, struct rule *rule, - struct ofpbuf *packet, const flow_t *flow) + struct ofpbuf *packet, const struct flow *flow) { const union odp_action *actions; struct odp_flow_stats stats; @@ -2037,7 +2037,7 @@ rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet, /* Send the packet and credit it to the rule. */ if (packet) { - flow_t flow; + struct flow flow; flow_extract(packet, 0, in_port, &flow); rule_execute(p, rule, packet, &flow); } @@ -2059,7 +2059,7 @@ rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet, static struct rule * rule_create_subrule(struct ofproto *ofproto, struct rule *rule, - const flow_t *flow) + const struct flow *flow) { struct rule *subrule = rule_create(ofproto, rule, NULL, 0, rule->idle_timeout, rule->hard_timeout, @@ -2131,7 +2131,7 @@ do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags, struct odp_flow_put *put) { memset(&put->flow.stats, 0, sizeof put->flow.stats); - put->flow.key = rule->cr.flow; + odp_flow_key_from_flow(&put->flow.key, &rule->cr.flow); put->flow.actions = rule->odp_actions; put->flow.n_actions = rule->n_odp_actions; put->flow.flags = 0; @@ -2235,7 +2235,7 @@ rule_uninstall(struct ofproto *p, struct rule *rule) if (rule->installed) { struct odp_flow odp_flow; - odp_flow.key = rule->cr.flow; + odp_flow_key_from_flow(&odp_flow.key, &rule->cr.flow); odp_flow.actions = NULL; odp_flow.n_actions = 0; odp_flow.flags = 0; @@ -2441,17 +2441,6 @@ handle_set_config(struct ofproto *p, struct ofconn *ofconn, return 0; } -static void -add_output_group_action(struct odp_actions *actions, uint16_t group, - uint16_t *nf_output_iface) -{ - odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group; - - if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) { - *nf_output_iface = NF_OUT_FLOOD; - } -} - static void add_controller_action(struct odp_actions *actions, uint16_t max_len) { @@ -2461,7 +2450,7 @@ add_controller_action(struct odp_actions *actions, uint16_t max_len) struct action_xlate_ctx { /* Input. */ - flow_t flow; /* Flow to which these actions correspond. */ + struct flow flow; /* Flow to which these actions correspond. */ int recurse; /* Recursion level, via xlate_table_action. */ struct ofproto *ofproto; const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a @@ -2506,7 +2495,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) } static struct rule * -lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow) +lookup_valid_rule(struct ofproto *ofproto, const struct flow *flow) { struct rule *rule; rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow)); @@ -2556,6 +2545,21 @@ xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) } } +static void +flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask, + uint16_t *nf_output_iface, struct odp_actions *actions) +{ + struct ofport *ofport; + + HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { + uint16_t odp_port = ofport->odp_port; + if (odp_port != odp_in_port && !(ofport->opp.config & mask)) { + odp_actions_add(actions, ODPAT_OUTPUT)->output.port = odp_port; + } + } + *nf_output_iface = NF_OUT_FLOOD; +} + static void xlate_output_action__(struct action_xlate_ctx *ctx, uint16_t port, uint16_t max_len) @@ -2582,11 +2586,12 @@ xlate_output_action__(struct action_xlate_ctx *ctx, } break; case OFPP_FLOOD: - add_output_group_action(ctx->out, DP_GROUP_FLOOD, - &ctx->nf_output_iface); + flood_packets(ctx->ofproto, ctx->flow.in_port, OFPPC_NO_FLOOD, + &ctx->nf_output_iface, ctx->out); break; case OFPP_ALL: - add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface); + flood_packets(ctx->ofproto, ctx->flow.in_port, 0, + &ctx->nf_output_iface, ctx->out); break; case OFPP_CONTROLLER: add_controller_action(ctx->out, max_len); @@ -2764,13 +2769,17 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, break; case OFPAT_SET_VLAN_VID: - oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID); - ctx->flow.dl_vlan = oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid; + oa = odp_actions_add(ctx->out, ODPAT_SET_DL_TCI); + oa->dl_tci.tci = ia->vlan_vid.vlan_vid; + oa->dl_tci.tci |= htons(ctx->flow.dl_vlan_pcp << VLAN_PCP_SHIFT); + ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid; break; case OFPAT_SET_VLAN_PCP: - oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP); - ctx->flow.dl_vlan_pcp = oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp; + oa = odp_actions_add(ctx->out, ODPAT_SET_DL_TCI); + oa->dl_tci.tci = htons(ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT); + oa->dl_tci.tci |= ctx->flow.dl_vlan; + ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp; break; case OFPAT_STRIP_VLAN: @@ -2837,7 +2846,7 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, static int xlate_actions(const union ofp_action *in, size_t n_in, - const flow_t *flow, struct ofproto *ofproto, + const struct flow *flow, struct ofproto *ofproto, const struct ofpbuf *packet, struct odp_actions *out, tag_type *tags, bool *may_set_up_flow, uint16_t *nf_output_iface) @@ -2907,9 +2916,9 @@ handle_packet_out(struct ofproto *p, struct ofconn *ofconn, struct ofp_packet_out *opo; struct ofpbuf payload, *buffer; struct odp_actions actions; + struct flow flow; int n_actions; uint16_t in_port; - flow_t flow; int error; error = reject_slave_controller(ofconn, oh); @@ -2942,8 +2951,7 @@ handle_packet_out(struct ofproto *p, struct ofconn *ofconn, return error; } - dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions, - &payload); + dpif_execute(p->dpif, actions.actions, actions.n_actions, &payload); ofpbuf_delete(buffer); return 0; @@ -2961,17 +2969,14 @@ update_port_config(struct ofproto *p, struct ofport *port, netdev_turn_flags_on(port->netdev, NETDEV_UP, true); } } -#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD) +#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | \ + OFPPC_NO_FWD | OFPPC_NO_FLOOD) if (mask & REVALIDATE_BITS) { COVERAGE_INC(ofproto_costly_flags); port->opp.config ^= mask & REVALIDATE_BITS; p->need_revalidate = true; } #undef REVALIDATE_BITS - if (mask & OFPPC_NO_FLOOD) { - port->opp.config ^= OFPPC_NO_FLOOD; - refresh_port_groups(p); - } if (mask & OFPPC_NO_PACKET_IN) { port->opp.config ^= OFPPC_NO_PACKET_IN; } @@ -3062,17 +3067,6 @@ handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn, return 0; } -static void -count_subrules(struct cls_rule *cls_rule, void *n_subrules_) -{ - struct rule *rule = rule_from_cls_rule(cls_rule); - int *n_subrules = n_subrules_; - - if (rule->super) { - (*n_subrules)++; - } -} - static int handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn, struct ofp_stats_request *request) @@ -3081,12 +3075,17 @@ handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn, struct ofpbuf *msg; struct odp_stats dpstats; int n_exact, n_subrules, n_wild; + struct rule *rule; msg = start_stats_reply(request, sizeof *ots * 2); /* Count rules of various kinds. */ n_subrules = 0; - classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules); + CLASSIFIER_FOR_EACH_EXACT_RULE (rule, cr, &p->cls) { + if (rule->super) { + n_subrules++; + } + } n_exact = classifier_count_exact(&p->cls) - n_subrules; n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls); @@ -3216,12 +3215,12 @@ query_stats(struct ofproto *p, struct rule *rule, if (rule->cr.wc.wildcards) { size_t i = 0; LIST_FOR_EACH (subrule, list, &rule->list) { - odp_flows[i++].key = subrule->cr.flow; + odp_flow_key_from_flow(&odp_flows[i++].key, &subrule->cr.flow); packet_count += subrule->packet_count; byte_count += subrule->byte_count; } } else { - odp_flows[0].key = rule->cr.flow; + odp_flow_key_from_flow(&odp_flows[0].key, &rule->cr.flow); } /* Fetch up-to-date statistics from the datapath and add them in. */ @@ -3349,6 +3348,8 @@ flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_) ofp_print_match(results, &match, true); if (act_len > 0) { ofp_print_actions(results, &rule->actions->header, act_len); + } else { + ds_put_cstr(results, "drop"); } ds_put_cstr(results, "\n"); } @@ -3617,7 +3618,7 @@ add_flow(struct ofproto *p, struct ofconn *ofconn, int error; if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) { - flow_t flow; + struct flow flow; uint32_t wildcards; flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie, @@ -3652,7 +3653,7 @@ static struct rule * find_flow_strict(struct ofproto *p, const struct ofp_flow_mod *ofm) { uint32_t wildcards; - flow_t flow; + struct flow flow; flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie, &flow, &wildcards); @@ -3667,7 +3668,7 @@ send_buffered_packet(struct ofproto *ofproto, struct ofconn *ofconn, { struct ofpbuf *packet; uint16_t in_port; - flow_t flow; + struct flow flow; int error; if (ofm->buffer_id == htonl(UINT32_MAX)) { @@ -4137,7 +4138,7 @@ handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet) struct odp_msg *msg = packet->data; struct rule *rule; struct ofpbuf payload; - flow_t flow; + struct flow flow; payload.data = msg + 1; payload.size = msg->length - sizeof *msg; @@ -4151,7 +4152,7 @@ handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet) memset(&action, 0, sizeof(action)); action.output.type = ODPAT_OUTPUT; action.output.port = ODPP_LOCAL; - dpif_execute(p->dpif, flow.in_port, &action, 1, &payload); + dpif_execute(p->dpif, &action, 1, &payload); } rule = lookup_valid_rule(p, &flow); @@ -4241,16 +4242,20 @@ handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) struct expire_cbdata { struct ofproto *ofproto; + int dp_max_idle; }; +static int ofproto_dp_max_idle(const struct ofproto *); static void ofproto_update_used(struct ofproto *); static void rule_expire(struct cls_rule *, void *cbdata); /* This function is called periodically by ofproto_run(). Its job is to * collect updates for the flows that have been installed into the datapath, * most importantly when they last were used, and then use that information to - * expire flows that have not been used recently. */ -static void + * expire flows that have not been used recently. + * + * Returns the number of milliseconds after which it should be called again. */ +static int ofproto_expire(struct ofproto *ofproto) { struct expire_cbdata cbdata; @@ -4258,9 +4263,14 @@ ofproto_expire(struct ofproto *ofproto) /* Update 'used' for each flow in the datapath. */ ofproto_update_used(ofproto); - /* Expire idle flows. */ + /* Expire idle flows. + * + * A wildcarded flow is idle only when all of its subrules have expired due + * to becoming idle, so iterate through the exact-match flows first. */ cbdata.ofproto = ofproto; - classifier_for_each(&ofproto->cls, CLS_INC_ALL, rule_expire, &cbdata); + cbdata.dp_max_idle = ofproto_dp_max_idle(ofproto); + classifier_for_each(&ofproto->cls, CLS_INC_EXACT, rule_expire, &cbdata); + classifier_for_each(&ofproto->cls, CLS_INC_WILD, rule_expire, &cbdata); /* Let the hook know that we're at a stable point: all outstanding data * in existing flows has been accounted to the account_cb. Thus, the @@ -4269,6 +4279,8 @@ ofproto_expire(struct ofproto *ofproto) if (ofproto->ofhooks->account_checkpoint_cb) { ofproto->ofhooks->account_checkpoint_cb(ofproto->aux); } + + return MIN(cbdata.dp_max_idle, 1000); } /* Update 'used' member of each flow currently installed into the datapath. */ @@ -4288,9 +4300,12 @@ ofproto_update_used(struct ofproto *p) for (i = 0; i < n_flows; i++) { struct odp_flow *f = &flows[i]; struct rule *rule; + struct flow flow; + + odp_flow_key_to_flow(&f->key, &flow); rule = rule_from_cls_rule( - classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX)); + classifier_find_rule_exactly(&p->cls, &flow, 0, UINT16_MAX)); if (rule && rule->installed) { update_time(p, rule, &f->stats); @@ -4306,6 +4321,96 @@ ofproto_update_used(struct ofproto *p) free(flows); } +/* Calculates and returns the number of milliseconds of idle time after which + * flows should expire from the datapath and we should fold their statistics + * into their parent rules in userspace. */ +static int +ofproto_dp_max_idle(const struct ofproto *ofproto) +{ + /* + * Idle time histogram. + * + * Most of the time a switch has a relatively small number of flows. When + * this is the case we might as well keep statistics for all of them in + * userspace and to cache them in the kernel datapath for performance as + * well. + * + * As the number of flows increases, the memory required to maintain + * statistics about them in userspace and in the kernel becomes + * significant. However, with a large number of flows it is likely that + * only a few of them are "heavy hitters" that consume a large amount of + * bandwidth. At this point, only heavy hitters are worth caching in the + * kernel and maintaining in userspaces; other flows we can discard. + * + * The technique used to compute the idle time is to build a histogram with + * N_BUCKETS bucket whose width is BUCKET_WIDTH msecs each. Each flow that + * is installed in the kernel gets dropped in the appropriate bucket. + * After the histogram has been built, we compute the cutoff so that only + * the most-recently-used 1% of flows (but at least 1000 flows) are kept + * cached. At least the most-recently-used bucket of flows is kept, so + * actually an arbitrary number of flows can be kept in any given + * expiration run (though the next run will delete most of those unless + * they receive additional data). + * + * This requires a second pass through the exact-match flows, in addition + * to the pass made by ofproto_update_used(), because the former function + * never looks at uninstallable flows. + */ + enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) }; + enum { N_BUCKETS = 5000 / BUCKET_WIDTH }; + int buckets[N_BUCKETS] = { 0 }; + int total, bucket; + struct rule *rule; + long long int now; + int i; + + total = classifier_count_exact(&ofproto->cls); + if (total <= 1000) { + return N_BUCKETS * BUCKET_WIDTH; + } + + /* Build histogram. */ + now = time_msec(); + CLASSIFIER_FOR_EACH_EXACT_RULE (rule, cr, &ofproto->cls) { + long long int idle = now - rule->used; + int bucket = (idle <= 0 ? 0 + : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1 + : (unsigned int) idle / BUCKET_WIDTH); + buckets[bucket]++; + } + + /* Find the first bucket whose flows should be expired. */ + for (bucket = 0; bucket < N_BUCKETS; bucket++) { + if (buckets[bucket]) { + int subtotal = 0; + do { + subtotal += buckets[bucket++]; + } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100)); + break; + } + } + + if (VLOG_IS_DBG_ENABLED()) { + struct ds s; + + ds_init(&s); + ds_put_cstr(&s, "keep"); + for (i = 0; i < N_BUCKETS; i++) { + if (i == bucket) { + ds_put_cstr(&s, ", drop"); + } + if (buckets[i]) { + ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]); + } + } + VLOG_INFO("%s: %s (msec:count)", + dpif_name(ofproto->dpif), ds_cstr(&s)); + ds_destroy(&s); + } + + return bucket * BUCKET_WIDTH; +} + static void rule_active_timeout(struct ofproto *ofproto, struct rule *rule) { @@ -4321,7 +4426,7 @@ rule_active_timeout(struct ofproto *ofproto, struct rule *rule) * ofproto_update_used() zeroed TCP flags. */ memset(&odp_flow, 0, sizeof odp_flow); if (rule->installed) { - odp_flow.key = rule->cr.flow; + odp_flow_key_from_flow(&odp_flow.key, &rule->cr.flow); odp_flow.flags = ODPFF_ZERO_TCP_FLAGS; dpif_flow_get(ofproto->dpif, &odp_flow); @@ -4371,7 +4476,7 @@ rule_expire(struct cls_rule *cls_rule, void *cbdata_) if (now < expire) { /* 'rule' has not expired according to OpenFlow rules. */ if (!rule->cr.wc.wildcards) { - if (now >= rule->used + 5000) { + if (now >= rule->used + cbdata->dp_max_idle) { /* This rule is idle, so drop it to free up resources. */ if (rule->super) { /* It's not part of the OpenFlow flow table, so we can @@ -4431,7 +4536,7 @@ revalidate_cb(struct cls_rule *sub_, void *cbdata_) static bool revalidate_rule(struct ofproto *p, struct rule *rule) { - const flow_t *flow = &rule->cr.flow; + const struct flow *flow = &rule->cr.flow; COVERAGE_INC(ofproto_revalidate_rule); if (rule->super) { @@ -4691,7 +4796,7 @@ pick_fallback_dpid(void) } static bool -default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet, +default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, struct odp_actions *actions, tag_type *tags, uint16_t *nf_output_iface, void *ofproto_) { @@ -4722,7 +4827,8 @@ default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet, out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags, NULL); if (out_port < 0) { - add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface); + flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD, + nf_output_iface, actions); } else if (out_port != flow->in_port) { odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port; *nf_output_iface = out_port; @@ -4734,7 +4840,6 @@ default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet, } static const struct ofhooks default_ofhooks = { - NULL, default_normal_ofhook_cb, NULL, NULL