X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=f757b58094019d6aed244014a0c71f4b4be01fe9;hb=1ce0a5fa82f1a0013dd62713d16fde973b029eb7;hp=a0dca356dc0979c775d613040e08dcc8ae427a32;hpb=ba25b8f41f4db5ed5c91f53b9b83b57f242a82d6;p=openvswitch diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index a0dca356..f757b580 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -25,9 +25,9 @@ #include #include #include "byte-order.h" +#include "cfm.h" #include "classifier.h" #include "coverage.h" -#include "discovery.h" #include "dpif.h" #include "dynamic-string.h" #include "fail-open.h" @@ -54,11 +54,11 @@ #include "poll-loop.h" #include "rconn.h" #include "shash.h" -#include "status.h" #include "stream-ssl.h" #include "svec.h" #include "tag.h" #include "timeval.h" +#include "unaligned.h" #include "unixctl.h" #include "vconn.h" #include "vlog.h" @@ -90,7 +90,9 @@ COVERAGE_DEFINE(ofproto_unexpected_rule); COVERAGE_DEFINE(ofproto_uninstallable); COVERAGE_DEFINE(ofproto_update_port); -#include "sflow_api.h" +/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a + * flow translation. */ +#define MAX_RESUBMIT_RECURSION 16 struct rule; @@ -99,10 +101,12 @@ struct ofport { struct netdev *netdev; struct ofp_phy_port opp; /* In host byte order. */ uint16_t odp_port; + struct cfm *cfm; /* Connectivity Fault Management, if any. */ }; static void ofport_free(struct ofport *); -static void hton_ofp_phy_port(struct ofp_phy_port *); +static void ofport_run(struct ofproto *, struct ofport *); +static void ofport_wait(struct ofport *); struct action_xlate_ctx { /* action_xlate_ctx_init() initializes these members. */ @@ -122,7 +126,12 @@ struct action_xlate_ctx { * * This is normally null so the client has to set it manually after * calling action_xlate_ctx_init(). */ - void (*resubmit_hook)(struct action_xlate_ctx *, const struct rule *); + void (*resubmit_hook)(struct action_xlate_ctx *, struct rule *); + + /* If true, the speciality of 'flow' should be checked before executing + * its actions. If special_cb returns false on 'flow' rendered + * uninstallable and no actions will be executed. */ + bool check_special; /* xlate_actions() initializes and uses these members. The client might want * to look at them after it returns. */ @@ -138,7 +147,7 @@ struct action_xlate_ctx { int recurse; /* Recursion level, via xlate_table_action. */ int last_pop_priority; /* Offset in 'odp_actions' just past most - * recently added ODPAT_SET_PRIORITY. */ + * recent ODP_ACTION_ATTR_SET_PRIORITY. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -194,6 +203,8 @@ static void rule_insert(struct ofproto *, struct rule *); static void rule_remove(struct ofproto *, struct rule *); static void rule_send_removed(struct ofproto *, struct rule *, uint8_t reason); +static void rule_get_stats(const struct rule *, uint64_t *packets, + uint64_t *bytes); /* An exact-match instantiation of an OpenFlow flow. */ struct facet { @@ -215,6 +226,13 @@ struct facet { uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ + uint64_t dp_packet_count; /* Last known packet count in the datapath. */ + uint64_t dp_byte_count; /* Last known byte count in the datapath. */ + + uint64_t rs_packet_count; /* Packets pushed to resubmit children. */ + uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */ + long long int rs_used; /* Used time pushed to resubmit children. */ + /* Number of bytes passed to account_cb. This may include bytes that can * currently obtained from the datapath (thus, it can be greater than * byte_count). */ @@ -250,6 +268,7 @@ static void facet_make_actions(struct ofproto *, struct facet *, const struct ofpbuf *packet); static void facet_update_stats(struct ofproto *, struct facet *, const struct dpif_flow_stats *); +static void facet_push_stats(struct ofproto *, struct facet *); /* ofproto supports two kinds of OpenFlow connections: * @@ -301,7 +320,8 @@ struct ofconn { /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ - struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */ +#define N_SCHEDULERS 2 + struct pinsched *schedulers[N_SCHEDULERS]; struct pktbuf *pktbuf; /* OpenFlow packet buffers. */ int miss_send_len; /* Bytes to send of buffered packets. */ @@ -314,30 +334,33 @@ struct ofconn { /* type == OFCONN_PRIMARY only. */ enum nx_role role; /* Role. */ struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */ - struct discovery *discovery; /* Controller discovery object, if enabled. */ - struct status_category *ss; /* Switch status category. */ enum ofproto_band band; /* In-band or out-of-band? */ }; -/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's - * "schedulers" array. Their values are 0 and 1, and their meanings and values - * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In - * case anything ever changes, check their values here. */ -#define N_SCHEDULERS 2 -BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0); -BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR); -BUILD_ASSERT_DECL(OFPR_ACTION == 1); -BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR); static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, enum ofconn_type); static void ofconn_destroy(struct ofconn *); static void ofconn_run(struct ofconn *); static void ofconn_wait(struct ofconn *); + static bool ofconn_receives_async_msgs(const struct ofconn *); static char *ofconn_make_name(const struct ofproto *, const char *target); static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst); +static struct ofproto *ofconn_get_ofproto(struct ofconn *); + +static enum nx_flow_format ofconn_get_flow_format(struct ofconn *); +static void ofconn_set_flow_format(struct ofconn *, enum nx_flow_format); + +static int ofconn_get_miss_send_len(const struct ofconn *); +static void ofconn_set_miss_send_len(struct ofconn *, int miss_send_len); + +static enum ofconn_type ofconn_get_type(const struct ofconn *); + +static enum nx_role ofconn_get_role(const struct ofconn *); +static void ofconn_set_role(struct ofconn *, enum nx_role); + static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn, struct rconn_packet_counter *counter); @@ -363,7 +386,6 @@ struct ofproto { uint32_t max_ports; /* Configuration. */ - struct switch_status *switch_status; struct fail_open *fail_open; struct netflow *netflow; struct ofproto_sflow *sflow; @@ -413,6 +435,9 @@ static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); static int ofproto_expire(struct ofproto *); +static void flow_push_stats(struct ofproto *, const struct rule *, + struct flow *, uint64_t packets, uint64_t bytes, + long long int used); static void handle_upcall(struct ofproto *, struct dpif_upcall *); @@ -444,7 +469,10 @@ ofproto_create(const char *datapath, const char *datapath_type, VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error)); return error; } - error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW); + error = dpif_recv_set_mask(dpif, + ((1u << DPIF_UC_MISS) | + (1u << DPIF_UC_ACTION) | + (1u << DPIF_UC_SAMPLE))); if (error) { VLOG_ERR("failed to listen on datapath %s: %s", datapath, strerror(error)); @@ -472,7 +500,6 @@ ofproto_create(const char *datapath, const char *datapath_type, p->max_ports = dpif_get_max_ports(dpif); /* Initialize submodules. */ - p->switch_status = switch_status_create(p); p->fail_open = NULL; p->netflow = NULL; p->sflow = NULL; @@ -532,82 +559,47 @@ ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id) } } -static bool -is_discovery_controller(const struct ofproto_controller *c) -{ - return !strcmp(c->target, "discover"); -} - -static bool -is_in_band_controller(const struct ofproto_controller *c) -{ - return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND; -} - /* Creates a new controller in 'ofproto'. Some of the settings are initially * drawn from 'c', but update_controller() needs to be called later to finish * the new ofconn's configuration. */ static void add_controller(struct ofproto *ofproto, const struct ofproto_controller *c) { - struct discovery *discovery; + char *name = ofconn_make_name(ofproto, c->target); struct ofconn *ofconn; - if (is_discovery_controller(c)) { - int error = discovery_create(c->accept_re, c->update_resolv_conf, - ofproto->dpif, ofproto->switch_status, - &discovery); - if (error) { - return; - } - } else { - discovery = NULL; - } - ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_PRIMARY); ofconn->pktbuf = pktbuf_create(); ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN; - if (discovery) { - ofconn->discovery = discovery; - } else { - char *name = ofconn_make_name(ofproto, c->target); - rconn_connect(ofconn->rconn, c->target, name); - free(name); - } + rconn_connect(ofconn->rconn, c->target, name); hmap_insert(&ofproto->controllers, &ofconn->hmap_node, hash_string(c->target, 0)); + + free(name); } /* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's - * target or turn discovery on or off (these are done by creating new ofconns - * and deleting old ones), but it can update the rest of an ofconn's - * settings. */ + * target (this is done by creating new ofconns and deleting old ones), but it + * can update the rest of an ofconn's settings. */ static void update_controller(struct ofconn *ofconn, const struct ofproto_controller *c) { int probe_interval; - ofconn->band = (is_in_band_controller(c) - ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND); + ofconn->band = c->band; rconn_set_max_backoff(ofconn->rconn, c->max_backoff); probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0; rconn_set_probe_interval(ofconn->rconn, probe_interval); - if (ofconn->discovery) { - discovery_set_update_resolv_conf(ofconn->discovery, - c->update_resolv_conf); - discovery_set_accept_controller_re(ofconn->discovery, c->accept_re); - } - ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit); } static const char * ofconn_get_target(const struct ofconn *ofconn) { - return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn); + return rconn_get_target(ofconn->rconn); } static struct ofconn * @@ -630,7 +622,6 @@ update_in_band_remotes(struct ofproto *ofproto) const struct ofconn *ofconn; struct sockaddr_in *addrs; size_t max_addrs, n_addrs; - bool discovery; size_t i; /* Allocate enough memory for as many remotes as we could possibly have. */ @@ -639,7 +630,6 @@ update_in_band_remotes(struct ofproto *ofproto) n_addrs = 0; /* Add all the remotes. */ - discovery = false; HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) { struct sockaddr_in *sin = &addrs[n_addrs]; @@ -652,23 +642,15 @@ update_in_band_remotes(struct ofproto *ofproto) sin->sin_port = rconn_get_remote_port(ofconn->rconn); n_addrs++; } - if (ofconn->discovery) { - discovery = true; - } } for (i = 0; i < ofproto->n_extra_remotes; i++) { addrs[n_addrs++] = ofproto->extra_in_band_remotes[i]; } - /* Create or update or destroy in-band. - * - * Ordinarily we only enable in-band if there's at least one remote - * address, but discovery needs the in-band rules for DHCP to be installed - * even before we know any remote addresses. */ - if (n_addrs || discovery) { + /* Create or update or destroy in-band. */ + if (n_addrs) { if (!ofproto->in_band) { - in_band_create(ofproto, ofproto->dpif, ofproto->switch_status, - &ofproto->in_band); + in_band_create(ofproto, ofproto->dpif, &ofproto->in_band); } if (ofproto->in_band) { in_band_set_remotes(ofproto->in_band, addrs, n_addrs); @@ -695,7 +677,7 @@ update_fail_open(struct ofproto *p) size_t n; if (!p->fail_open) { - p->fail_open = fail_open_create(p, p->switch_status); + p->fail_open = fail_open_create(p); } n = 0; @@ -720,7 +702,6 @@ ofproto_set_controllers(struct ofproto *p, struct shash new_controllers; struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice, *next_ofservice; - bool ss_exists; size_t i; /* Create newly configured controllers and services. @@ -729,7 +710,7 @@ ofproto_set_controllers(struct ofproto *p, for (i = 0; i < n_controllers; i++) { const struct ofproto_controller *c = &controllers[i]; - if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) { + if (!vconn_verify_name(c->target)) { if (!find_controller_by_target(p, c->target)) { add_controller(p, c); } @@ -748,7 +729,6 @@ ofproto_set_controllers(struct ofproto *p, /* Delete controllers that are no longer configured. * Update configuration of all now-existing controllers. */ - ss_exists = false; HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) { struct ofproto_controller *c; @@ -757,9 +737,6 @@ ofproto_set_controllers(struct ofproto *p, ofconn_destroy(ofconn); } else { update_controller(ofconn, c); - if (ofconn->ss) { - ss_exists = true; - } } } @@ -781,13 +758,6 @@ ofproto_set_controllers(struct ofproto *p, update_in_band_remotes(p); update_fail_open(p); - - if (!hmap_is_empty(&p->controllers) && !ss_exists) { - ofconn = CONTAINER_OF(hmap_first(&p->controllers), - struct ofconn, hmap_node); - ofconn->ss = switch_status_register(p->switch_status, "remote", - rconn_status_cb, ofconn->rconn); - } } void @@ -994,7 +964,70 @@ ofproto_set_sflow(struct ofproto *ofproto, ofproto->sflow = NULL; } } + +/* Connectivity Fault Management configuration. */ + +/* Clears the CFM configuration from 'port_no' on 'ofproto'. */ +void +ofproto_iface_clear_cfm(struct ofproto *ofproto, uint32_t port_no) +{ + struct ofport *ofport = get_port(ofproto, port_no); + if (ofport && ofport->cfm){ + cfm_destroy(ofport->cfm); + ofport->cfm = NULL; + } +} + +/* Configures connectivity fault management on 'port_no' in 'ofproto'. Takes + * basic configuration from the configuration members in 'cfm', and the set of + * remote maintenance points from the 'n_remote_mps' elements in 'remote_mps'. + * Ignores the statistics members of 'cfm'. + * + * This function has no effect if 'ofproto' does not have a port 'port_no'. */ +void +ofproto_iface_set_cfm(struct ofproto *ofproto, uint32_t port_no, + const struct cfm *cfm, + const uint16_t *remote_mps, size_t n_remote_mps) +{ + struct ofport *ofport; + + ofport = get_port(ofproto, port_no); + if (!ofport) { + VLOG_WARN("%s: cannot configure CFM on nonexistent port %"PRIu32, + dpif_name(ofproto->dpif), port_no); + return; + } + + if (!ofport->cfm) { + ofport->cfm = cfm_create(); + } + + ofport->cfm->mpid = cfm->mpid; + ofport->cfm->interval = cfm->interval; + memcpy(ofport->cfm->maid, cfm->maid, CCM_MAID_LEN); + + cfm_update_remote_mps(ofport->cfm, remote_mps, n_remote_mps); + + if (!cfm_configure(ofport->cfm)) { + VLOG_WARN("%s: CFM configuration on port %"PRIu32" (%s) failed", + dpif_name(ofproto->dpif), port_no, + netdev_get_name(ofport->netdev)); + cfm_destroy(ofport->cfm); + ofport->cfm = NULL; + } +} +/* Returns the connectivity fault management object associated with 'port_no' + * within 'ofproto', or a null pointer if 'ofproto' does not have a port + * 'port_no' or if that port does not have CFM configured. The caller must not + * modify or destroy the returned object. */ +const struct cfm * +ofproto_iface_get_cfm(struct ofproto *ofproto, uint32_t port_no) +{ + struct ofport *ofport = get_port(ofproto, port_no); + return ofport ? ofport->cfm : NULL; +} + uint64_t ofproto_get_datapath_id(const struct ofproto *ofproto) { @@ -1062,7 +1095,6 @@ ofproto_destroy(struct ofproto *p) } shash_destroy(&p->port_by_name); - switch_status_destroy(p->switch_status); netflow_destroy(p->netflow); ofproto_sflow_destroy(p->sflow); @@ -1116,7 +1148,7 @@ process_port_change(struct ofproto *ofproto, int error, char *devname) static int snoop_preference(const struct ofconn *ofconn) { - switch (ofconn->role) { + switch (ofconn_get_role(ofconn)) { case NX_ROLE_MASTER: return 3; case NX_ROLE_OTHER: @@ -1139,7 +1171,7 @@ add_snooper(struct ofproto *ofproto, struct vconn *vconn) /* Pick a controller for monitoring. */ best = NULL; LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) { - if (ofconn->type == OFCONN_PRIMARY + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY && (!best || snoop_preference(ofconn) > snoop_preference(best))) { best = ofconn; } @@ -1158,6 +1190,7 @@ ofproto_run1(struct ofproto *p) { struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice; + struct ofport *ofport; char *devname; int error; int i; @@ -1194,6 +1227,10 @@ ofproto_run1(struct ofproto *p) process_port_change(p, error, devname); } + HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { + ofport_run(p, ofport); + } + if (p->in_band) { if (time_msec() >= p->next_in_band_update) { update_in_band_remotes(p); @@ -1294,11 +1331,15 @@ ofproto_wait(struct ofproto *p) { struct ofservice *ofservice; struct ofconn *ofconn; + struct ofport *ofport; size_t i; dpif_recv_wait(p->dpif); dpif_port_poll_wait(p->dpif); netdev_monitor_poll_wait(p->netdev_monitor); + HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { + ofport_wait(ofport); + } LIST_FOR_EACH (ofconn, node, &p->all_conns) { ofconn_wait(ofconn); } @@ -1349,7 +1390,7 @@ ofproto_is_alive(const struct ofproto *p) } void -ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, +ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, struct shash *info) { const struct ofconn *ofconn; @@ -1358,32 +1399,40 @@ ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) { const struct rconn *rconn = ofconn->rconn; + time_t now = time_now(); + time_t last_connection = rconn_get_last_connection(rconn); + time_t last_disconnect = rconn_get_last_disconnect(rconn); const int last_error = rconn_get_last_error(rconn); struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo); shash_add(info, rconn_get_target(rconn), cinfo); cinfo->is_connected = rconn_is_connected(rconn); - cinfo->role = ofconn->role; + cinfo->role = ofconn_get_role(ofconn); cinfo->pairs.n = 0; - if (last_error == EOF) { - cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; - cinfo->pairs.values[cinfo->pairs.n++] = xstrdup("End of file"); - } else if (last_error > 0) { + if (last_error) { cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; cinfo->pairs.values[cinfo->pairs.n++] = - xstrdup(strerror(last_error)); + xstrdup(ovs_retval_to_string(last_error)); } cinfo->pairs.keys[cinfo->pairs.n] = "state"; cinfo->pairs.values[cinfo->pairs.n++] = xstrdup(rconn_get_state(rconn)); - cinfo->pairs.keys[cinfo->pairs.n] = "time_in_state"; - cinfo->pairs.values[cinfo->pairs.n++] = - xasprintf("%u", rconn_get_state_elapsed(rconn)); + if (last_connection != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_connection)); + } + + if (last_disconnect != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_disconnect)); + } } } @@ -1444,24 +1493,34 @@ ofproto_port_is_floodable(struct ofproto *ofproto, uint16_t odp_port) return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD); } +/* Sends 'packet' out of port 'port_no' within 'p'. If 'vlan_tci' is zero the + * packet will not have any 802.1Q hader; if it is nonzero, then the packet + * will be sent with the VLAN TCI specified by 'vlan_tci & ~VLAN_CFI'. + * + * Returns 0 if successful, otherwise a positive errno value. */ int -ofproto_send_packet(struct ofproto *p, const struct flow *flow, - const union ofp_action *actions, size_t n_actions, +ofproto_send_packet(struct ofproto *ofproto, + uint32_t port_no, uint16_t vlan_tci, const struct ofpbuf *packet) { - struct action_xlate_ctx ctx; - struct ofpbuf *odp_actions; - - action_xlate_ctx_init(&ctx, p, flow, packet); - odp_actions = xlate_actions(&ctx, actions, n_actions); - - /* XXX Should we translate the dpif_execute() errno value into an OpenFlow - * error code? */ - dpif_execute(p->dpif, odp_actions->data, odp_actions->size, packet); + struct ofpbuf odp_actions; + int error; - ofpbuf_delete(odp_actions); + ofpbuf_init(&odp_actions, 32); + if (vlan_tci != 0) { + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, + ntohs(vlan_tci & ~VLAN_CFI)); + } + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, port_no); + error = dpif_execute(ofproto->dpif, odp_actions.data, odp_actions.size, + packet); + ofpbuf_uninit(&odp_actions); - return 0; + if (error) { + VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)", + dpif_name(ofproto->dpif), port_no, strerror(error)); + } + return error; } /* Adds a flow to the OpenFlow flow table in 'p' that matches 'cls_rule' and @@ -1509,6 +1568,8 @@ ofproto_flush_flows(struct ofproto *ofproto) * individually since we are about to blow away all the facets with * dpif_flow_flush(). */ facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; facet_remove(ofproto, facet); } @@ -1574,7 +1635,7 @@ make_ofport(const struct dpif_port *dpif_port) return NULL; } - ofport = xmalloc(sizeof *ofport); + ofport = xzalloc(sizeof *ofport); ofport->netdev = netdev; ofport->odp_port = dpif_port->port_no; ofport->opp.port_no = odp_port_to_ofp_port(dpif_port->port_no); @@ -1638,7 +1699,7 @@ send_port_status(struct ofproto *p, const struct ofport *ofport, /* Primary controllers, even slaves, should always get port status updates. Otherwise obey ofconn_receives_async_msgs(). */ - if (ofconn->type != OFCONN_PRIMARY + if (ofconn_get_type(ofconn) != OFCONN_PRIMARY && !ofconn_receives_async_msgs(ofconn)) { continue; } @@ -1676,10 +1737,39 @@ ofport_remove(struct ofproto *p, struct ofport *ofport) } } +static void +ofport_run(struct ofproto *ofproto, struct ofport *ofport) +{ + if (ofport->cfm) { + cfm_run(ofport->cfm); + + if (cfm_should_send_ccm(ofport->cfm)) { + struct ofpbuf packet; + struct ccm *ccm; + + ofpbuf_init(&packet, 0); + ccm = compose_packet(&packet, eth_addr_ccm, ofport->opp.hw_addr, + ETH_TYPE_CFM, sizeof *ccm); + cfm_compose_ccm(ofport->cfm, ccm); + ofproto_send_packet(ofproto, ofport->odp_port, 0, &packet); + ofpbuf_uninit(&packet); + } + } +} + +static void +ofport_wait(struct ofport *ofport) +{ + if (ofport->cfm) { + cfm_wait(ofport->cfm); + } +} + static void ofport_free(struct ofport *ofport) { if (ofport) { + cfm_destroy(ofport->cfm); netdev_close(ofport->netdev); free(ofport); } @@ -1809,13 +1899,13 @@ ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type) static void ofconn_destroy(struct ofconn *ofconn) { - if (ofconn->type == OFCONN_PRIMARY) { - hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node); + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); + + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) { + hmap_remove(&ofproto->controllers, &ofconn->hmap_node); } - discovery_destroy(ofconn->discovery); list_remove(&ofconn->node); - switch_status_unregister(ofconn->ss); rconn_destroy(ofconn->rconn); rconn_packet_counter_destroy(ofconn->packet_in_counter); rconn_packet_counter_destroy(ofconn->reply_counter); @@ -1826,26 +1916,10 @@ ofconn_destroy(struct ofconn *ofconn) static void ofconn_run(struct ofconn *ofconn) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); int iteration; size_t i; - if (ofconn->discovery) { - char *controller_name; - if (rconn_is_connectivity_questionable(ofconn->rconn)) { - discovery_question_connectivity(ofconn->discovery); - } - if (discovery_run(ofconn->discovery, &controller_name)) { - if (controller_name) { - char *ofconn_name = ofconn_make_name(p, controller_name); - rconn_connect(ofconn->rconn, controller_name, ofconn_name); - free(ofconn_name); - } else { - rconn_disconnect(ofconn->rconn); - } - } - } - for (i = 0; i < N_SCHEDULERS; i++) { pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn); } @@ -1868,7 +1942,7 @@ ofconn_run(struct ofconn *ofconn) } } - if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) { + if (!rconn_is_alive(ofconn->rconn)) { ofconn_destroy(ofconn); } } @@ -1878,9 +1952,6 @@ ofconn_wait(struct ofconn *ofconn) { int i; - if (ofconn->discovery) { - discovery_wait(ofconn->discovery); - } for (i = 0; i < N_SCHEDULERS; i++) { pinsched_wait(ofconn->schedulers[i]); } @@ -1896,10 +1967,10 @@ ofconn_wait(struct ofconn *ofconn) static bool ofconn_receives_async_msgs(const struct ofconn *ofconn) { - if (ofconn->type == OFCONN_PRIMARY) { + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) { /* Primary controllers always get asynchronous messages unless they * have configured themselves as "slaves". */ - return ofconn->role != NX_ROLE_SLAVE; + return ofconn_get_role(ofconn) != NX_ROLE_SLAVE; } else { /* Service connections don't get asynchronous messages unless they have * explicitly asked for them by setting a nonzero miss send length. */ @@ -1929,8 +2000,7 @@ ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst) if (rate > 0) { if (!*s) { - *s = pinsched_create(rate, burst, - ofconn->ofproto->switch_status); + *s = pinsched_create(rate, burst); } else { pinsched_set_limits(*s, rate, burst); } @@ -1940,6 +2010,54 @@ ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst) } } } + +static struct ofproto * +ofconn_get_ofproto(struct ofconn *ofconn) +{ + return ofconn->ofproto; +} + +static enum nx_flow_format +ofconn_get_flow_format(struct ofconn *ofconn) +{ + return ofconn->flow_format; +} + +static void +ofconn_set_flow_format(struct ofconn *ofconn, enum nx_flow_format flow_format) +{ + ofconn->flow_format = flow_format; +} + +static int +ofconn_get_miss_send_len(const struct ofconn *ofconn) +{ + return ofconn->miss_send_len; +} + +static void +ofconn_set_miss_send_len(struct ofconn *ofconn, int miss_send_len) +{ + ofconn->miss_send_len = miss_send_len; +} + +static enum ofconn_type +ofconn_get_type(const struct ofconn *ofconn) +{ + return ofconn->type; +} + +static enum nx_role +ofconn_get_role(const struct ofconn *ofconn) +{ + return ofconn->role; +} + +static void +ofconn_set_role(struct ofconn *ofconn, enum nx_role role) +{ + ofconn->role = role; +} static void ofservice_reconfigure(struct ofservice *ofservice, @@ -2093,13 +2211,13 @@ execute_odp_actions(struct ofproto *ofproto, const struct flow *flow, struct ofpbuf *packet) { if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) - && odp_actions->nla_type == ODPAT_CONTROLLER) { + && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ struct dpif_upcall upcall; - upcall.type = _ODPL_ACTION_NR; + upcall.type = DPIF_UC_ACTION; upcall.packet = packet; upcall.key = NULL; upcall.key_len = 0; @@ -2141,12 +2259,10 @@ facet_execute(struct ofproto *ofproto, struct facet *facet, assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in)); flow_extract_stats(&facet->flow, packet, &stats); + stats.used = time_msec(); if (execute_odp_actions(ofproto, &facet->flow, facet->actions, facet->actions_len, packet)) { facet_update_stats(ofproto, facet, &stats); - facet->used = time_msec(); - netflow_flow_update_time(ofproto->netflow, - &facet->nf_flow, facet->used); } } @@ -2198,6 +2314,7 @@ rule_execute(struct ofproto *ofproto, struct rule *rule, uint16_t in_port, rule->used = time_msec(); rule->packet_count++; rule->byte_count += size; + flow_push_stats(ofproto, rule, &flow, 1, size, rule->used); } ofpbuf_delete(odp_actions); } @@ -2308,17 +2425,26 @@ facet_make_actions(struct ofproto *p, struct facet *facet, static int facet_put__(struct ofproto *ofproto, struct facet *facet, - enum dpif_flow_put_flags flags) + const struct nlattr *actions, size_t actions_len, + struct dpif_flow_stats *stats) { uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; + enum dpif_flow_put_flags flags; struct ofpbuf key; + flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; + if (stats) { + flags |= DPIF_FP_ZERO_STATS; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } + ofpbuf_use_stack(&key, keybuf, sizeof keybuf); odp_flow_key_from_flow(&key, &facet->flow); assert(key.base == keybuf); return dpif_flow_put(ofproto->dpif, flags, key.data, key.size, - facet->actions, facet->actions_len, NULL); + actions, actions_len, stats); } /* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If @@ -2327,14 +2453,12 @@ facet_put__(struct ofproto *ofproto, struct facet *facet, static void facet_install(struct ofproto *p, struct facet *facet, bool zero_stats) { - if (facet->may_install) { - enum dpif_flow_put_flags flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; - if (zero_stats) { - flags |= DPIF_FP_ZERO_STATS; - } - if (!facet_put__(p, facet, flags)) { - facet->installed = true; - } + struct dpif_flow_stats stats; + + if (facet->may_install + && !facet_put__(p, facet, facet->actions, facet->actions_len, + zero_stats ? &stats : NULL)) { + facet->installed = true; } } @@ -2373,6 +2497,11 @@ facet_uninstall(struct ofproto *p, struct facet *facet) facet_update_stats(p, facet, &stats); } facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } else { + assert(facet->dp_packet_count == 0); + assert(facet->dp_byte_count == 0); } } @@ -2389,10 +2518,16 @@ facet_is_controller_flow(struct facet *facet) } /* Folds all of 'facet''s statistics into its rule. Also updates the - * accounting ofhook and emits a NetFlow expiration if appropriate. */ + * accounting ofhook and emits a NetFlow expiration if appropriate. All of + * 'facet''s statistics in the datapath should have been zeroed and folded into + * its packet and byte counts before this function is called. */ static void facet_flush_stats(struct ofproto *ofproto, struct facet *facet) { + assert(!facet->dp_byte_count); + assert(!facet->dp_packet_count); + + facet_push_stats(ofproto, facet); facet_account(ofproto, facet, 0); if (ofproto->netflow && !facet_is_controller_flow(facet)) { @@ -2411,6 +2546,8 @@ facet_flush_stats(struct ofproto *ofproto, struct facet *facet) * reinstalled. */ facet->packet_count = 0; facet->byte_count = 0; + facet->rs_packet_count = 0; + facet->rs_byte_count = 0; facet->accounted_bytes = 0; netflow_flow_clear(&facet->nf_flow); @@ -2499,20 +2636,12 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) /* If the ODP actions changed or the installability changed, then we need * to talk to the datapath. */ - if (actions_changed || facet->may_install != facet->installed) { - if (facet->may_install) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; + if (actions_changed || ctx.may_set_up_flow != facet->installed) { + if (ctx.may_set_up_flow) { struct dpif_flow_stats stats; - struct ofpbuf key; - - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - odp_flow_key_from_flow(&key, &facet->flow); - - dpif_flow_put(ofproto->dpif, - DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_ZERO_STATS, - key.data, key.size, - odp_actions->data, odp_actions->size, &stats); + facet_put__(ofproto, facet, + odp_actions->data, odp_actions->size, &stats); facet_update_stats(ofproto, facet, &stats); } else { facet_uninstall(ofproto, facet); @@ -2538,6 +2667,7 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) list_push_back(&new_rule->facets, &facet->list_node); facet->rule = new_rule; facet->used = new_rule->created; + facet->rs_used = facet->used; } ofpbuf_delete(odp_actions); @@ -2555,6 +2685,12 @@ queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn, } } +static void +ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg) +{ + queue_tx(msg, ofconn, ofconn->reply_counter); +} + static void send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh, int error) @@ -2562,38 +2698,27 @@ send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh, struct ofpbuf *buf = ofputil_encode_error_msg(error, oh); if (buf) { COVERAGE_INC(ofproto_error); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); } } -static void -hton_ofp_phy_port(struct ofp_phy_port *opp) -{ - opp->port_no = htons(opp->port_no); - opp->config = htonl(opp->config); - opp->state = htonl(opp->state); - opp->curr = htonl(opp->curr); - opp->advertised = htonl(opp->advertised); - opp->supported = htonl(opp->supported); - opp->peer = htonl(opp->peer); -} - static int handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh) { - queue_tx(make_echo_reply(oh), ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, make_echo_reply(oh)); return 0; } static int handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofp_switch_features *osf; struct ofpbuf *buf; struct ofport *port; osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf); - osf->datapath_id = htonll(ofconn->ofproto->datapath_id); + osf->datapath_id = htonll(ofproto->datapath_id); osf->n_buffers = htonl(pktbuf_capacity()); osf->n_tables = 2; osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS | @@ -2611,31 +2736,32 @@ handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh) (1u << OFPAT_SET_TP_DST) | (1u << OFPAT_ENQUEUE)); - HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) { + HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp)); } - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } static int handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf *buf; struct ofp_switch_config *osc; uint16_t flags; bool drop_frags; /* Figure out flags. */ - dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags); + dpif_get_drop_frags(ofproto->dpif, &drop_frags); flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL; /* Send reply. */ osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf); osc->flags = htons(flags); - osc->miss_send_len = htons(ofconn->miss_send_len); - queue_tx(buf, ofconn, ofconn->reply_counter); + osc->miss_send_len = htons(ofconn_get_miss_send_len(ofconn)); + ofconn_send_reply(ofconn, buf); return 0; } @@ -2643,15 +2769,17 @@ handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) static int handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); uint16_t flags = ntohs(osc->flags); - if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) { + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY + && ofconn_get_role(ofconn) != NX_ROLE_SLAVE) { switch (flags & OFPC_FRAG_MASK) { case OFPC_FRAG_NORMAL: - dpif_set_drop_frags(ofconn->ofproto->dpif, false); + dpif_set_drop_frags(ofproto->dpif, false); break; case OFPC_FRAG_DROP: - dpif_set_drop_frags(ofconn->ofproto->dpif, true); + dpif_set_drop_frags(ofproto->dpif, true); break; default: VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")", @@ -2660,15 +2788,11 @@ handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc) } } - ofconn->miss_send_len = ntohs(osc->miss_send_len); + ofconn_set_miss_send_len(ofconn, ntohs(osc->miss_send_len)); return 0; } -/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a - * flow translation. */ -#define MAX_RESUBMIT_RECURSION 16 - static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx); @@ -2690,7 +2814,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) */ } - nl_msg_put_u32(ctx->odp_actions, ODPAT_OUTPUT, port); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, port); ctx->nf_output_iface = port; } @@ -2741,7 +2865,7 @@ flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask, HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { uint16_t odp_port = ofport->odp_port; if (odp_port != odp_in_port && !(ofport->opp.config & mask)) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, odp_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); } } *nf_output_iface = NF_OUT_FLOOD; @@ -2781,7 +2905,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, &ctx->nf_output_iface, ctx->odp_actions); break; case OFPP_CONTROLLER: - nl_msg_put_u64(ctx->odp_actions, ODPAT_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); break; case OFPP_LOCAL: add_output_action(ctx, ODPP_LOCAL); @@ -2828,7 +2952,7 @@ static void add_pop_action(struct action_xlate_ctx *ctx) { if (ctx->odp_actions->size != ctx->last_pop_priority) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_POP_PRIORITY); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_POP_PRIORITY); ctx->last_pop_priority = ctx->odp_actions->size; } } @@ -2859,7 +2983,7 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, /* Add ODP actions. */ remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); add_output_action(ctx, odp_port); add_pop_action(ctx); @@ -2887,7 +3011,7 @@ xlate_set_queue_action(struct action_xlate_ctx *ctx, } remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); } static void @@ -2895,9 +3019,9 @@ xlate_set_dl_tci(struct action_xlate_ctx *ctx) { ovs_be16 tci = ctx->flow.vlan_tci; if (!(tci & htons(VLAN_CFI))) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_STRIP_VLAN); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); } else { - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_DL_TCI, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, tci & ~htons(VLAN_CFI)); } } @@ -2923,7 +3047,8 @@ update_reg_state(struct action_xlate_ctx *ctx, xlate_set_dl_tci(ctx); } if (ctx->flow.tun_id != state->tun_id) { - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, ctx->flow.tun_id); + nl_msg_put_be64(ctx->odp_actions, + ODP_ACTION_ATTR_SET_TUNNEL, ctx->flow.tun_id); } } @@ -2949,13 +3074,14 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL: nast = (const struct nx_action_set_tunnel *) nah; tun_id = htonll(ntohl(nast->tun_id)); - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; case NXAST_DROP_SPOOFED_ARP: if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_DROP_SPOOFED_ARP); + nl_msg_put_flag(ctx->odp_actions, + ODP_ACTION_ATTR_DROP_SPOOFED_ARP); } break; @@ -2988,7 +3114,7 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL64: tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id; - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; @@ -3052,44 +3178,44 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPAT_SET_DL_SRC: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_SRC, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_SRC, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_DL_DST: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_DST, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_DST, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_NW_SRC: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_SRC, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_SRC, ia->nw_addr.nw_addr); ctx->flow.nw_src = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_DST: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_DST, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_DST, ia->nw_addr.nw_addr); ctx->flow.nw_dst = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_TOS: - nl_msg_put_u8(ctx->odp_actions, ODPAT_SET_NW_TOS, + nl_msg_put_u8(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_TOS, ia->nw_tos.nw_tos); ctx->flow.nw_tos = ia->nw_tos.nw_tos; break; case OFPAT_SET_TP_SRC: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_SRC, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_SRC, ia->tp_port.tp_port); ctx->flow.tp_src = ia->tp_port.tp_port; break; case OFPAT_SET_TP_DST: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_DST, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_DST, ia->tp_port.tp_port); ctx->flow.tp_dst = ia->tp_port.tp_port; break; @@ -3118,6 +3244,19 @@ action_xlate_ctx_init(struct action_xlate_ctx *ctx, ctx->flow = *flow; ctx->packet = packet; ctx->resubmit_hook = NULL; + ctx->check_special = true; +} + +static void +ofproto_process_cfm(struct ofproto *ofproto, const struct flow *flow, + const struct ofpbuf *packet) +{ + struct ofport *ofport; + + ofport = get_port(ofproto, flow->in_port); + if (ofport && ofport->cfm) { + cfm_process_heartbeat(ofport->cfm, packet); + } } static struct ofpbuf * @@ -3132,7 +3271,21 @@ xlate_actions(struct action_xlate_ctx *ctx, ctx->nf_output_iface = NF_OUT_DROP; ctx->recurse = 0; ctx->last_pop_priority = -1; - do_xlate_actions(in, n_in, ctx); + + if (ctx->check_special && cfm_should_process_flow(&ctx->flow)) { + if (ctx->packet) { + ofproto_process_cfm(ctx->ofproto, &ctx->flow, ctx->packet); + } + ctx->may_set_up_flow = false; + } else if (ctx->check_special + && ctx->ofproto->ofhooks->special_cb + && !ctx->ofproto->ofhooks->special_cb(&ctx->flow, ctx->packet, + ctx->ofproto->aux)) { + ctx->may_set_up_flow = false; + } else { + do_xlate_actions(in, n_in, ctx); + } + remove_pop_action(ctx); /* Check with in-band control to see if we're allowed to set up this @@ -3153,7 +3306,8 @@ xlate_actions(struct action_xlate_ctx *ctx, static int reject_slave_controller(struct ofconn *ofconn, const const char *msg_type) { - if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) { + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY + && ofconn_get_role(ofconn) == NX_ROLE_SLAVE) { static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller", msg_type); @@ -3167,7 +3321,7 @@ reject_slave_controller(struct ofconn *ofconn, const const char *msg_type) static int handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_packet_out *opo; struct ofpbuf payload, *buffer; union ofp_action *ofp_actions; @@ -3257,7 +3411,7 @@ update_port_config(struct ofproto *p, struct ofport *port, static int handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); const struct ofp_port_mod *opm = (const struct ofp_port_mod *) oh; struct ofport *port; int error; @@ -3312,7 +3466,7 @@ append_ofp_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofp_stats_reply *reply = msg->data; reply->flags = htons(OFPSF_REPLY_MORE); *msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); } return ofpbuf_put_uninit(*msgp, nbytes); } @@ -3348,7 +3502,7 @@ append_nxstats_reply(size_t nbytes, struct ofconn *ofconn, struct nicira_stats_msg *reply = msg->data; reply->flags = htons(OFPSF_REPLY_MORE); *msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); } ofpbuf_prealloc_tailroom(*msgp, nbytes); } @@ -3357,7 +3511,7 @@ static int handle_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_desc_stats *ods; struct ofpbuf *msg; @@ -3369,7 +3523,7 @@ handle_desc_stats_request(struct ofconn *ofconn, ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc); ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num); ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } @@ -3378,7 +3532,7 @@ static int handle_table_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_table_stats *ots; struct ofpbuf *msg; @@ -3388,14 +3542,14 @@ handle_table_stats_request(struct ofconn *ofconn, ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg); memset(ots, 0, sizeof *ots); strcpy(ots->name, "classifier"); - ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10 + ots->wildcards = (ofconn_get_flow_format(ofconn) == NXFF_OPENFLOW10 ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL)); ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */ ots->active_count = htonl(classifier_count(&p->cls)); - ots->lookup_count = htonll(0); /* XXX */ - ots->matched_count = htonll(0); /* XXX */ + put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */ + put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */ - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } @@ -3414,24 +3568,24 @@ append_port_stat(struct ofport *port, struct ofconn *ofconn, ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp); ops->port_no = htons(port->opp.port_no); memset(ops->pad, 0, sizeof ops->pad); - ops->rx_packets = htonll(stats.rx_packets); - ops->tx_packets = htonll(stats.tx_packets); - ops->rx_bytes = htonll(stats.rx_bytes); - ops->tx_bytes = htonll(stats.tx_bytes); - ops->rx_dropped = htonll(stats.rx_dropped); - ops->tx_dropped = htonll(stats.tx_dropped); - ops->rx_errors = htonll(stats.rx_errors); - ops->tx_errors = htonll(stats.tx_errors); - ops->rx_frame_err = htonll(stats.rx_frame_errors); - ops->rx_over_err = htonll(stats.rx_over_errors); - ops->rx_crc_err = htonll(stats.rx_crc_errors); - ops->collisions = htonll(stats.collisions); + put_32aligned_be64(&ops->rx_packets, htonll(stats.rx_packets)); + put_32aligned_be64(&ops->tx_packets, htonll(stats.tx_packets)); + put_32aligned_be64(&ops->rx_bytes, htonll(stats.rx_bytes)); + put_32aligned_be64(&ops->tx_bytes, htonll(stats.tx_bytes)); + put_32aligned_be64(&ops->rx_dropped, htonll(stats.rx_dropped)); + put_32aligned_be64(&ops->tx_dropped, htonll(stats.tx_dropped)); + put_32aligned_be64(&ops->rx_errors, htonll(stats.rx_errors)); + put_32aligned_be64(&ops->tx_errors, htonll(stats.tx_errors)); + put_32aligned_be64(&ops->rx_frame_err, htonll(stats.rx_frame_errors)); + put_32aligned_be64(&ops->rx_over_err, htonll(stats.rx_over_errors)); + put_32aligned_be64(&ops->rx_crc_err, htonll(stats.rx_crc_errors)); + put_32aligned_be64(&ops->collisions, htonll(stats.collisions)); } static int handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); const struct ofp_port_stats_request *psr = ofputil_stats_body(oh); struct ofp_port_stats *ops; struct ofpbuf *msg; @@ -3449,56 +3603,26 @@ handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) } } - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } -/* Obtains statistic counters for 'rule' within 'p' and stores them into - * '*packet_countp' and '*byte_countp'. The returned statistics include - * statistics for all of 'rule''s facets. */ static void -query_stats(struct ofproto *p, struct rule *rule, - uint64_t *packet_countp, uint64_t *byte_countp) +calc_flow_duration__(long long int start, uint32_t *sec, uint32_t *nsec) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; - uint64_t packet_count, byte_count; - struct facet *facet; - struct ofpbuf key; - - /* Start from historical data for 'rule' itself that are no longer tracked - * by the datapath. This counts, for example, facets that have expired. */ - packet_count = rule->packet_count; - byte_count = rule->byte_count; - - /* Ask the datapath for statistics on all of the rule's facets. - * - * Also, add any statistics that are not tracked by the datapath for each - * facet. This includes, for example, statistics for packets that were - * executed "by hand" by ofproto via dpif_execute() but must be accounted - * to a rule. */ - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - LIST_FOR_EACH (facet, list_node, &rule->facets) { - struct dpif_flow_stats stats; - - ofpbuf_clear(&key); - odp_flow_key_from_flow(&key, &facet->flow); - dpif_flow_get(p->dpif, 0, key.data, key.size, NULL, &stats); - - packet_count += stats.n_packets + facet->packet_count; - byte_count += stats.n_bytes + facet->byte_count; - } - - /* Return the stats to the caller. */ - *packet_countp = packet_count; - *byte_countp = byte_count; + long long int msecs = time_msec() - start; + *sec = msecs / 1000; + *nsec = (msecs % 1000) * (1000 * 1000); } static void -calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec) +calc_flow_duration(long long int start, ovs_be32 *sec_be, ovs_be32 *nsec_be) { - long long int msecs = time_msec() - start; - *sec = htonl(msecs / 1000); - *nsec = htonl((msecs % 1000) * (1000 * 1000)); + uint32_t sec, nsec; + + calc_flow_duration__(start, &sec, &nsec); + *sec_be = htonl(sec); + *nsec_be = htonl(nsec); } static void @@ -3507,6 +3631,7 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, { struct ofp_flow_stats *ofs; uint64_t packet_count, byte_count; + ovs_be64 cookie; size_t act_len, len; if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) { @@ -3516,21 +3641,22 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, act_len = sizeof *rule->actions * rule->n_actions; len = offsetof(struct ofp_flow_stats, actions) + act_len; - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ofs = append_ofp_stats_reply(len, ofconn, replyp); ofs->length = htons(len); ofs->table_id = 0; ofs->pad = 0; - ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match, - rule->flow_cookie, &ofs->cookie); + ofputil_cls_rule_to_match(&rule->cr, ofconn_get_flow_format(ofconn), + &ofs->match, rule->flow_cookie, &cookie); + put_32aligned_be64(&ofs->cookie, cookie); calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec); ofs->priority = htons(rule->cr.priority); ofs->idle_timeout = htons(rule->idle_timeout); ofs->hard_timeout = htons(rule->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); - ofs->packet_count = htonll(packet_count); - ofs->byte_count = htonll(byte_count); + put_32aligned_be64(&ofs->packet_count, htonll(packet_count)); + put_32aligned_be64(&ofs->byte_count, htonll(byte_count)); if (rule->n_actions > 0) { memcpy(ofs->actions, rule->actions, act_len); } @@ -3539,13 +3665,23 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, static bool is_valid_table(uint8_t table_id) { - return table_id == 0 || table_id == 0xff; + if (table_id == 0 || table_id == 0xff) { + return true; + } else { + /* It would probably be better to reply with an error but there doesn't + * seem to be any appropriate value, so that might just be + * confusing. */ + VLOG_WARN_RL(&rl, "controller asked for invalid table %"PRIu8, + table_id); + return false; + } } static int handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { const struct ofp_flow_stats_request *fsr = ofputil_stats_body(oh); + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf *reply; COVERAGE_INC(ofproto_flows_req); @@ -3557,12 +3693,12 @@ handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) ofputil_cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0, &target); - cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target); + cls_cursor_init(&cursor, &ofproto->cls, &target); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { put_ofp_flow_stats(ofconn, rule, fsr->out_port, &reply); } } - queue_tx(reply, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, reply); return 0; } @@ -3580,7 +3716,7 @@ put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule, return; } - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); act_len = sizeof *rule->actions * rule->n_actions; @@ -3609,6 +3745,7 @@ put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule, static int handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct nx_flow_stats_request *nfsr; struct cls_rule target; struct ofpbuf *reply; @@ -3633,30 +3770,32 @@ handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh) struct cls_cursor cursor; struct rule *rule; - cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target); + cls_cursor_init(&cursor, &ofproto->cls, &target); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { put_nx_flow_stats(ofconn, rule, nfsr->out_port, &reply); } } - queue_tx(reply, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, reply); return 0; } static void -flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) +flow_stats_ds(struct rule *rule, struct ds *results) { uint64_t packet_count, byte_count; size_t act_len = sizeof *rule->actions * rule->n_actions; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ds_put_format(results, "duration=%llds, ", (time_msec() - rule->created) / 1000); + ds_put_format(results, "idle=%.3fs, ", (time_msec() - rule->used) / 1000.0); ds_put_format(results, "priority=%u, ", rule->cr.priority); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); cls_rule_format(&rule->cr, results); + ds_put_char(results, ','); if (act_len > 0) { ofp_print_actions(results, &rule->actions->header, act_len); } else { @@ -3666,7 +3805,7 @@ flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) } /* Adds a pretty-printed description of all flows to 'results', including - * those marked hidden by secchan (e.g., by in-band control). */ + * hidden flows (e.g., set up by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) { @@ -3675,7 +3814,7 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results) cls_cursor_init(&cursor, &p->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { - flow_stats_ds(p, rule, results); + flow_stats_ds(rule, results); } } @@ -3700,7 +3839,7 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, uint64_t packet_count; uint64_t byte_count; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); total_packets += packet_count; total_bytes += byte_count; @@ -3710,8 +3849,8 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, } oasr->flow_count = htonl(n_flows); - oasr->packet_count = htonll(total_packets); - oasr->byte_count = htonll(total_bytes); + put_32aligned_be64(&oasr->packet_count, htonll(total_packets)); + put_32aligned_be64(&oasr->byte_count, htonll(total_bytes)); memset(oasr->pad, 0, sizeof oasr->pad); } @@ -3720,6 +3859,7 @@ handle_aggregate_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { const struct ofp_aggregate_stats_request *request = ofputil_stats_body(oh); + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofp_aggregate_stats_reply *reply; struct cls_rule target; struct ofpbuf *msg; @@ -3729,15 +3869,16 @@ handle_aggregate_stats_request(struct ofconn *ofconn, msg = start_ofp_stats_reply(oh, sizeof *reply); reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg); - query_aggregate_stats(ofconn->ofproto, &target, request->out_port, + query_aggregate_stats(ofproto, &target, request->out_port, request->table_id, reply); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } static int handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct nx_aggregate_stats_request *request; struct ofp_aggregate_stats_reply *reply; struct cls_rule target; @@ -3761,9 +3902,9 @@ handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh) COVERAGE_INC(ofproto_flows_req); buf = start_nxstats_reply(&request->nsm, sizeof *reply); reply = ofpbuf_put_uninit(buf, sizeof *reply); - query_aggregate_stats(ofconn->ofproto, &target, request->out_port, + query_aggregate_stats(ofproto, &target, request->out_port, request->table_id, reply); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } @@ -3784,9 +3925,9 @@ put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, reply->port_no = htons(cbdata->ofport->opp.port_no); memset(reply->pad, 0, sizeof reply->pad); reply->queue_id = htonl(queue_id); - reply->tx_bytes = htonll(stats->tx_bytes); - reply->tx_packets = htonll(stats->tx_packets); - reply->tx_errors = htonll(stats->tx_errors); + put_32aligned_be64(&reply->tx_bytes, htonll(stats->tx_bytes)); + put_32aligned_be64(&reply->tx_packets, htonll(stats->tx_packets)); + put_32aligned_be64(&reply->tx_errors, htonll(stats->tx_errors)); } static void @@ -3819,7 +3960,7 @@ handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id, static int handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *ofproto = ofconn->ofproto; + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); const struct ofp_queue_stats_request *qsr; struct queue_stats_cbdata cbdata; struct ofport *port; @@ -3851,16 +3992,17 @@ handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) ofpbuf_delete(cbdata.msg); return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT); } - queue_tx(cbdata.msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, cbdata.msg); return 0; } +/* Updates 'facet''s used time. Caller is responsible for calling + * facet_push_stats() to update the flows which 'facet' resubmits into. */ static void facet_update_time(struct ofproto *ofproto, struct facet *facet, - const struct dpif_flow_stats *stats) + long long int used) { - long long int used = stats->used; if (used > facet->used) { facet->used = used; if (used > facet->rule->used) { @@ -3880,19 +4022,79 @@ static void facet_update_stats(struct ofproto *ofproto, struct facet *facet, const struct dpif_flow_stats *stats) { - if (stats->n_packets) { - facet_update_time(ofproto, facet, stats); + if (stats->n_packets || stats->used > facet->used) { + facet_update_time(ofproto, facet, stats->used); facet->packet_count += stats->n_packets; facet->byte_count += stats->n_bytes; + facet_push_stats(ofproto, facet); netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags); } } +static void +facet_push_stats(struct ofproto *ofproto, struct facet *facet) +{ + uint64_t rs_packets, rs_bytes; + + assert(facet->packet_count >= facet->rs_packet_count); + assert(facet->byte_count >= facet->rs_byte_count); + assert(facet->used >= facet->rs_used); + + rs_packets = facet->packet_count - facet->rs_packet_count; + rs_bytes = facet->byte_count - facet->rs_byte_count; + + if (rs_packets || rs_bytes || facet->used > facet->rs_used) { + facet->rs_packet_count = facet->packet_count; + facet->rs_byte_count = facet->byte_count; + facet->rs_used = facet->used; + + flow_push_stats(ofproto, facet->rule, &facet->flow, + rs_packets, rs_bytes, facet->used); + } +} + +struct ofproto_push { + struct action_xlate_ctx ctx; + uint64_t packets; + uint64_t bytes; + long long int used; +}; + +static void +push_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) +{ + struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx); + + if (rule) { + rule->packet_count += push->packets; + rule->byte_count += push->bytes; + rule->used = MAX(push->used, rule->used); + } +} + +/* Pushes flow statistics to the rules which 'flow' resubmits into given + * 'rule''s actions. */ +static void +flow_push_stats(struct ofproto *ofproto, const struct rule *rule, + struct flow *flow, uint64_t packets, uint64_t bytes, + long long int used) +{ + struct ofproto_push push; + + push.packets = packets; + push.bytes = bytes; + push.used = used; + + action_xlate_ctx_init(&push.ctx, ofproto, flow, NULL); + push.ctx.resubmit_hook = push_resubmit; + ofpbuf_delete(xlate_actions(&push.ctx, rule->actions, rule->n_actions)); +} + /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT * in which no matching flow already exists in the flow table. * * Adds the flow specified by 'ofm', which is followed by 'n_actions' - * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an + * ofp_actions, to the ofproto's flow table. Returns 0 on success or an * OpenFlow error code as encoded by ofp_mkerr() on failure. * * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id, @@ -3900,7 +4102,7 @@ facet_update_stats(struct ofproto *ofproto, struct facet *facet, static int add_flow(struct ofconn *ofconn, struct flow_mod *fm) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofpbuf *packet; struct rule *rule; uint16_t in_port; @@ -3940,6 +4142,7 @@ static int send_buffered_packet(struct ofconn *ofconn, struct rule *rule, uint32_t buffer_id) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf *packet; uint16_t in_port; int error; @@ -3953,7 +4156,7 @@ send_buffered_packet(struct ofconn *ofconn, return error; } - rule_execute(ofconn->ofproto, rule, in_port, packet); + rule_execute(ofproto, rule, in_port, packet); return 0; } @@ -3977,7 +4180,7 @@ static int modify_flow(struct ofproto *, const struct flow_mod *, static int modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct rule *match = NULL; struct cls_cursor cursor; struct rule *rule; @@ -4009,7 +4212,7 @@ modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm) static int modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct rule *rule = find_flow_strict(p, fm); if (rule && !rule_is_hidden(rule)) { modify_flow(p, fm, rule); @@ -4100,7 +4303,7 @@ delete_flow(struct ofproto *p, struct rule *rule, ovs_be16 out_port) static int handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct flow_mod fm; int error; @@ -4109,7 +4312,7 @@ handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) return error; } - error = ofputil_decode_flow_mod(&fm, oh, ofconn->flow_format); + error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_flow_format(ofconn)); if (error) { return error; } @@ -4156,8 +4359,11 @@ handle_tun_id_from_cookie(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nxt_tun_id_cookie *msg = (const struct nxt_tun_id_cookie *) oh; + enum nx_flow_format flow_format; + + flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10; + ofconn_set_flow_format(ofconn, flow_format); - ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10; return 0; } @@ -4169,7 +4375,7 @@ handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh) struct ofpbuf *buf; uint32_t role; - if (ofconn->type != OFCONN_PRIMARY) { + if (ofconn_get_type(ofconn) != OFCONN_PRIMARY) { VLOG_WARN_RL(&rl, "ignoring role request on non-controller " "connection"); return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM); @@ -4185,19 +4391,20 @@ handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh) } if (role == NX_ROLE_MASTER) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofconn *other; - HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) { - if (other->role == NX_ROLE_MASTER) { - other->role = NX_ROLE_SLAVE; + HMAP_FOR_EACH (other, hmap_node, &ofproto->controllers) { + if (ofconn_get_role(other) == NX_ROLE_MASTER) { + ofconn_set_role(other, NX_ROLE_SLAVE); } } } - ofconn->role = role; + ofconn_set_role(ofconn, role); reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, oh->xid, &buf); reply->role = htonl(role); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } @@ -4213,7 +4420,7 @@ handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh) if (format == NXFF_OPENFLOW10 || format == NXFF_TUN_ID_FROM_COOKIE || format == NXFF_NXM) { - ofconn->flow_format = format; + ofconn_set_flow_format(ofconn, format); return 0; } else { return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM); @@ -4229,7 +4436,7 @@ handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh) /* Currently, everything executes synchronously, so we can just * immediately send the barrier reply. */ ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } @@ -4276,10 +4483,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) return 0; /* Nicira extension requests. */ - case OFPUTIL_NXT_STATUS_REQUEST: - return switch_status_handle_request( - ofconn->ofproto->switch_status, ofconn->rconn, oh); - case OFPUTIL_NXT_TUN_ID_FROM_COOKIE: return handle_tun_id_from_cookie(ofconn, oh); @@ -4335,7 +4538,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) case OFPUTIL_OFPST_PORT_REPLY: case OFPUTIL_OFPST_TABLE_REPLY: case OFPUTIL_OFPST_AGGREGATE_REPLY: - case OFPUTIL_NXT_STATUS_REPLY: case OFPUTIL_NXT_ROLE_REPLY: case OFPUTIL_NXT_FLOW_REMOVED: case OFPUTIL_NXST_FLOW_REPLY: @@ -4376,16 +4578,20 @@ handle_miss_upcall(struct ofproto *p, struct dpif_upcall *upcall) /* Set header pointers in 'flow'. */ flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow); + if (cfm_should_process_flow(&flow)) { + ofproto_process_cfm(p, &flow, upcall->packet); + ofpbuf_delete(upcall->packet); + return; + } else if (p->ofhooks->special_cb + && !p->ofhooks->special_cb(&flow, upcall->packet, p->aux)) { + ofpbuf_delete(upcall->packet); + return; + } + /* Check with in-band control to see if this packet should be sent * to the local port regardless of the flow table. */ if (in_band_msg_in_hook(p->in_band, &flow, upcall->packet)) { - struct ofpbuf odp_actions; - - ofpbuf_init(&odp_actions, 32); - nl_msg_put_u32(&odp_actions, ODPAT_OUTPUT, ODPP_LOCAL); - dpif_execute(p->dpif, odp_actions.data, odp_actions.size, - upcall->packet); - ofpbuf_uninit(&odp_actions); + ofproto_send_packet(p, ODPP_LOCAL, 0, upcall->packet); } facet = facet_lookup_valid(p, &flow); @@ -4442,13 +4648,13 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) struct flow flow; switch (upcall->type) { - case _ODPL_ACTION_NR: + case DPIF_UC_ACTION: COVERAGE_INC(ofproto_ctlr_action); odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); send_packet_in(p, upcall, &flow, false); break; - case _ODPL_SFLOW_NR: + case DPIF_UC_SAMPLE: if (p->sflow) { odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); ofproto_sflow_received(p->sflow, upcall, &flow); @@ -4456,10 +4662,11 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) ofpbuf_delete(upcall->packet); break; - case _ODPL_MISS_NR: + case DPIF_UC_MISS: handle_miss_upcall(p, upcall); break; + case DPIF_N_UC_TYPES: default: VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); break; @@ -4469,7 +4676,7 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) /* Flow expiration. */ static int ofproto_dp_max_idle(const struct ofproto *); -static void ofproto_update_used(struct ofproto *); +static void ofproto_update_stats(struct ofproto *); static void rule_expire(struct ofproto *, struct rule *); static void ofproto_expire_facets(struct ofproto *, int dp_max_idle); @@ -4486,8 +4693,8 @@ ofproto_expire(struct ofproto *ofproto) struct cls_cursor cursor; int dp_max_idle; - /* Update 'used' for each flow in the datapath. */ - ofproto_update_used(ofproto); + /* Update stats for each flow in the datapath. */ + ofproto_update_stats(ofproto); /* Expire facets that have been idle too long. */ dp_max_idle = ofproto_dp_max_idle(ofproto); @@ -4510,9 +4717,19 @@ ofproto_expire(struct ofproto *ofproto) return MIN(dp_max_idle, 1000); } -/* Update 'used' member of installed facets. */ +/* Update 'packet_count', 'byte_count', and 'used' members of installed facets. + * + * This function also pushes statistics updates to rules which each facet + * resubmits into. Generally these statistics will be accurate. However, if a + * facet changes the rule it resubmits into at some time in between + * ofproto_update_stats() runs, it is possible that statistics accrued to the + * old rule will be incorrectly attributed to the new rule. This could be + * avoided by calling ofproto_update_stats() whenever rules are created or + * deleted. However, the performance impact of making so many calls to the + * datapath do not justify the benefit of having perfectly accurate statistics. + */ static void -ofproto_update_used(struct ofproto *p) +ofproto_update_stats(struct ofproto *p) { const struct dpif_flow_stats *stats; struct dpif_flow_dump dump; @@ -4538,8 +4755,25 @@ ofproto_update_used(struct ofproto *p) facet = facet_find(p, &flow); if (facet && facet->installed) { - facet_update_time(p, facet, stats); + + if (stats->n_packets >= facet->dp_packet_count) { + facet->packet_count += stats->n_packets - facet->dp_packet_count; + } else { + VLOG_WARN_RL(&rl, "unexpected packet count from the datapath"); + } + + if (stats->n_bytes >= facet->dp_byte_count) { + facet->byte_count += stats->n_bytes - facet->dp_byte_count; + } else { + VLOG_WARN_RL(&rl, "unexpected byte count from datapath"); + } + + facet->dp_packet_count = stats->n_packets; + facet->dp_byte_count = stats->n_bytes; + + facet_update_time(p, facet, stats->used); facet_account(p, facet, stats->n_bytes); + facet_push_stats(p, facet); } else { /* There's a flow in the datapath that we know nothing about. * Delete it. */ @@ -4582,7 +4816,7 @@ ofproto_dp_max_idle(const struct ofproto *ofproto) * they receive additional data). * * This requires a second pass through the facets, in addition to the pass - * made by ofproto_update_used(), because the former function never looks + * made by ofproto_update_stats(), because the former function never looks * at uninstallable facets. */ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) }; @@ -4647,36 +4881,18 @@ facet_active_timeout(struct ofproto *ofproto, struct facet *facet) netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) { struct ofexpired expired; - expired.flow = facet->flow; - expired.packet_count = facet->packet_count; - expired.byte_count = facet->byte_count; - expired.used = facet->used; - - /* Get updated flow stats. - * - * XXX We could avoid this call entirely if (1) ofproto_update_used() - * updated TCP flags and (2) the dpif_flow_list_all() in - * ofproto_update_used() zeroed TCP flags. */ if (facet->installed) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; struct dpif_flow_stats stats; - struct ofpbuf key; - - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - odp_flow_key_from_flow(&key, &facet->flow); - - if (!dpif_flow_get(ofproto->dpif, ODPFF_ZERO_TCP_FLAGS, - key.data, key.size, NULL, &stats)) { - expired.packet_count += stats.n_packets; - expired.byte_count += stats.n_bytes; - if (stats.n_packets) { - facet_update_time(ofproto, facet, &stats); - netflow_flow_update_flags(&facet->nf_flow, - stats.tcp_flags); - } - } + + facet_put__(ofproto, facet, facet->actions, facet->actions_len, + &stats); + facet_update_stats(ofproto, facet, &stats); } + expired.flow = facet->flow; + expired.packet_count = facet->packet_count; + expired.byte_count = facet->byte_count; + expired.used = facet->used; netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); } } @@ -4731,58 +4947,24 @@ rule_expire(struct ofproto *ofproto, struct rule *rule) rule_remove(ofproto, rule); } -static struct ofpbuf * -compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule, - uint8_t reason) -{ - struct ofp_flow_removed *ofr; - struct ofpbuf *buf; - - ofr = make_openflow_xid(sizeof *ofr, OFPT_FLOW_REMOVED, htonl(0), &buf); - ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofr->match, - rule->flow_cookie, &ofr->cookie); - ofr->priority = htons(rule->cr.priority); - ofr->reason = reason; - calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec); - ofr->idle_timeout = htons(rule->idle_timeout); - ofr->packet_count = htonll(rule->packet_count); - ofr->byte_count = htonll(rule->byte_count); - - return buf; -} - -static struct ofpbuf * -compose_nx_flow_removed(const struct rule *rule, uint8_t reason) -{ - struct nx_flow_removed *nfr; - struct ofpbuf *buf; - int match_len; - - make_nxmsg_xid(sizeof *nfr, NXT_FLOW_REMOVED, htonl(0), &buf); - match_len = nx_put_match(buf, &rule->cr); - - nfr = buf->data; - nfr->cookie = rule->flow_cookie; - nfr->priority = htons(rule->cr.priority); - nfr->reason = reason; - calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec); - nfr->idle_timeout = htons(rule->idle_timeout); - nfr->match_len = htons(match_len); - nfr->packet_count = htonll(rule->packet_count); - nfr->byte_count = htonll(rule->byte_count); - - return buf; -} - static void rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason) { + struct ofputil_flow_removed fr; struct ofconn *ofconn; if (!rule->send_flow_removed) { return; } + fr.rule = rule->cr; + fr.cookie = rule->flow_cookie; + fr.reason = reason; + calc_flow_duration__(rule->created, &fr.duration_sec, &fr.duration_nsec); + fr.idle_timeout = rule->idle_timeout; + fr.packet_count = rule->packet_count; + fr.byte_count = rule->byte_count; + LIST_FOR_EACH (ofconn, node, &p->all_conns) { struct ofpbuf *msg; @@ -4791,17 +4973,39 @@ rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason) continue; } - msg = (ofconn->flow_format == NXFF_NXM - ? compose_nx_flow_removed(rule, reason) - : compose_ofp_flow_removed(ofconn, rule, reason)); + /* This accounts flow expirations as if they were replies to OpenFlow + * requests. That works because preventing OpenFlow requests from + * being processed also prevents new flows from being added (and + * expiring). (It also prevents processing OpenFlow requests that + * would not add new flows, so it is imperfect.) */ + msg = ofputil_encode_flow_removed(&fr, ofconn_get_flow_format(ofconn)); + ofconn_send_reply(ofconn, msg); + } +} - /* Account flow expirations under ofconn->reply_counter, the counter - * for replies to OpenFlow requests. That works because preventing - * OpenFlow requests from being processed also prevents new flows from - * being added (and expiring). (It also prevents processing OpenFlow - * requests that would not add new flows, so it is imperfect.) */ - queue_tx(msg, ofconn, ofconn->reply_counter); +/* Obtains statistics for 'rule' and stores them in '*packets' and '*bytes'. + * The returned statistics include statistics for all of 'rule''s facets. */ +static void +rule_get_stats(const struct rule *rule, uint64_t *packets, uint64_t *bytes) +{ + uint64_t p, b; + struct facet *facet; + + /* Start from historical data for 'rule' itself that are no longer tracked + * in facets. This counts, for example, facets that have expired. */ + p = rule->packet_count; + b = rule->byte_count; + + /* Add any statistics that are tracked by facets. This includes + * statistical data recently updated by ofproto_update_stats() as well as + * stats for packets that were executed "by hand" via dpif_execute(). */ + LIST_FOR_EACH (facet, list_node, &rule->facets) { + p += facet->packet_count; + b += facet->byte_count; } + + *packets = p; + *bytes = b; } /* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */ @@ -4824,64 +5028,50 @@ static void schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, const struct flow *flow, bool clone) { - enum { OPI_SIZE = offsetof(struct ofp_packet_in, data) }; - struct ofproto *ofproto = ofconn->ofproto; - struct ofp_packet_in *opi; - int total_len, send_len; - struct ofpbuf *packet; - uint32_t buffer_id; + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); + struct ofputil_packet_in pin; + struct ofpbuf *msg; + + /* Figure out the easy parts. */ + pin.packet = upcall->packet; + pin.in_port = odp_port_to_ofp_port(flow->in_port); + pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; /* Get OpenFlow buffer_id. */ - if (upcall->type == _ODPL_ACTION_NR) { - buffer_id = UINT32_MAX; + if (upcall->type == DPIF_UC_ACTION) { + pin.buffer_id = UINT32_MAX; } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { - buffer_id = pktbuf_get_null(); + pin.buffer_id = pktbuf_get_null(); } else if (!ofconn->pktbuf) { - buffer_id = UINT32_MAX; + pin.buffer_id = UINT32_MAX; } else { - buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, flow->in_port); + pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, + flow->in_port); } /* Figure out how much of the packet to send. */ - total_len = send_len = upcall->packet->size; - if (buffer_id != UINT32_MAX) { - send_len = MIN(send_len, ofconn->miss_send_len); - } - if (upcall->type == _ODPL_ACTION_NR) { - send_len = MIN(send_len, upcall->userdata); + pin.send_len = upcall->packet->size; + if (pin.buffer_id != UINT32_MAX) { + pin.send_len = MIN(pin.send_len, ofconn->miss_send_len); } - - /* Copy or steal buffer for OFPT_PACKET_IN. */ - if (clone) { - packet = ofpbuf_clone_data_with_headroom(upcall->packet->data, - send_len, OPI_SIZE); - } else { - packet = upcall->packet; - packet->size = send_len; + if (upcall->type == DPIF_UC_ACTION) { + pin.send_len = MIN(pin.send_len, upcall->userdata); } - /* Add OFPT_PACKET_IN. */ - opi = ofpbuf_push_zeros(packet, OPI_SIZE); - opi->header.version = OFP_VERSION; - opi->header.type = OFPT_PACKET_IN; - opi->total_len = htons(total_len); - opi->in_port = htons(odp_port_to_ofp_port(flow->in_port)); - opi->reason = upcall->type == _ODPL_MISS_NR ? OFPR_NO_MATCH : OFPR_ACTION; - opi->buffer_id = htonl(buffer_id); - update_openflow_length(packet); - - /* Hand over to packet scheduler. It might immediately call into - * do_send_packet_in() or it might buffer it for a while (until a later - * call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[opi->reason], flow->in_port, - packet, do_send_packet_in, ofconn); + /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might + * immediately call into do_send_packet_in() or it might buffer it for a + * while (until a later call to pinsched_run()). */ + msg = ofputil_encode_packet_in(&pin, clone ? NULL : upcall->packet); + pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1], + flow->in_port, msg, do_send_packet_in, ofconn); } -/* Given 'upcall', of type _ODPL_ACTION_NR or _ODPL_MISS_NR, sends an +/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to * their individual configurations. * - * Takes ownership of 'packet'. */ + * If 'clone' is true, the caller retains ownership of 'upcall->packet'. + * Otherwise, ownership is transferred to this function. */ static void send_packet_in(struct ofproto *ofproto, struct dpif_upcall *upcall, const struct flow *flow, bool clone) @@ -4990,7 +5180,7 @@ trace_format_flow(struct ds *result, int level, const char *title, } static void -trace_resubmit(struct action_xlate_ctx *ctx, const struct rule *rule) +trace_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) { struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx); struct ds *result = trace->result; @@ -5035,7 +5225,7 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, goto exit; } - tun_id = htonll(strtoull(tun_id_s, NULL, 10)); + tun_id = htonll(strtoull(tun_id_s, NULL, 0)); in_port = ofp_port_to_odp_port(atoi(in_port_s)); packet_s = ofpbuf_put_hex(&packet, packet_s, NULL); @@ -5106,7 +5296,7 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, uint16_t *nf_output_iface, void *ofproto_) { struct ofproto *ofproto = ofproto_; - int out_port; + struct mac_entry *dst_mac; /* Drop frames for reserved multicast addresses. */ if (eth_addr_is_reserved(flow->dl_dst)) { @@ -5114,31 +5304,37 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, } /* Learn source MAC (but don't try to learn from revalidation). */ - if (packet != NULL) { - tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src, - 0, flow->in_port, - GRAT_ARP_LOCK_NONE); - if (rev_tag) { + if (packet != NULL + && mac_learning_may_learn(ofproto->ml, flow->dl_src, 0)) { + struct mac_entry *src_mac; + + src_mac = mac_learning_insert(ofproto->ml, flow->dl_src, 0); + if (mac_entry_is_new(src_mac) || src_mac->port.i != flow->in_port) { /* The log messages here could actually be useful in debugging, * so keep the rate limit relatively high. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300); VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16, ETH_ADDR_ARGS(flow->dl_src), flow->in_port); - ofproto_revalidate(ofproto, rev_tag); + + ofproto_revalidate(ofproto, + mac_learning_changed(ofproto->ml, src_mac)); + src_mac->port.i = flow->in_port; } } /* Determine output port. */ - out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags, - NULL); - if (out_port < 0) { + dst_mac = mac_learning_lookup(ofproto->ml, flow->dl_dst, 0, tags); + if (!dst_mac) { flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD, nf_output_iface, odp_actions); - } else if (out_port != flow->in_port) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, out_port); - *nf_output_iface = out_port; } else { - /* Drop. */ + int out_port = dst_mac->port.i; + if (out_port != flow->in_port) { + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port); + *nf_output_iface = out_port; + } else { + /* Drop. */ + } } return true; @@ -5147,5 +5343,6 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, static const struct ofhooks default_ofhooks = { default_normal_ofhook_cb, NULL, + NULL, NULL };