X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=f757b58094019d6aed244014a0c71f4b4be01fe9;hb=1ce0a5fa82f1a0013dd62713d16fde973b029eb7;hp=86d0ae67715c33b41a5ccd84aaa511cb4d776a62;hpb=cdee00fd635d1e0f1eeb5d9c009daeb59abd4777;p=openvswitch diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 86d0ae67..f757b580 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010 Nicira Networks. + * Copyright (c) 2009, 2010, 2011 Nicira Networks. * Copyright (c) 2010 Jean Tourrilhes - HP-Labs. * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,9 +25,9 @@ #include #include #include "byte-order.h" +#include "cfm.h" #include "classifier.h" #include "coverage.h" -#include "discovery.h" #include "dpif.h" #include "dynamic-string.h" #include "fail-open.h" @@ -35,6 +35,7 @@ #include "hmap.h" #include "in-band.h" #include "mac-learning.h" +#include "multipath.h" #include "netdev.h" #include "netflow.h" #include "netlink.h" @@ -53,11 +54,11 @@ #include "poll-loop.h" #include "rconn.h" #include "shash.h" -#include "status.h" #include "stream-ssl.h" #include "svec.h" #include "tag.h" #include "timeval.h" +#include "unaligned.h" #include "unixctl.h" #include "vconn.h" #include "vlog.h" @@ -89,7 +90,9 @@ COVERAGE_DEFINE(ofproto_unexpected_rule); COVERAGE_DEFINE(ofproto_uninstallable); COVERAGE_DEFINE(ofproto_update_port); -#include "sflow_api.h" +/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a + * flow translation. */ +#define MAX_RESUBMIT_RECURSION 16 struct rule; @@ -98,10 +101,12 @@ struct ofport { struct netdev *netdev; struct ofp_phy_port opp; /* In host byte order. */ uint16_t odp_port; + struct cfm *cfm; /* Connectivity Fault Management, if any. */ }; static void ofport_free(struct ofport *); -static void hton_ofp_phy_port(struct ofp_phy_port *); +static void ofport_run(struct ofproto *, struct ofport *); +static void ofport_wait(struct ofport *); struct action_xlate_ctx { /* action_xlate_ctx_init() initializes these members. */ @@ -121,7 +126,12 @@ struct action_xlate_ctx { * * This is normally null so the client has to set it manually after * calling action_xlate_ctx_init(). */ - void (*resubmit_hook)(struct action_xlate_ctx *, const struct rule *); + void (*resubmit_hook)(struct action_xlate_ctx *, struct rule *); + + /* If true, the speciality of 'flow' should be checked before executing + * its actions. If special_cb returns false on 'flow' rendered + * uninstallable and no actions will be executed. */ + bool check_special; /* xlate_actions() initializes and uses these members. The client might want * to look at them after it returns. */ @@ -137,7 +147,7 @@ struct action_xlate_ctx { int recurse; /* Recursion level, via xlate_table_action. */ int last_pop_priority; /* Offset in 'odp_actions' just past most - * recently added ODPAT_SET_PRIORITY. */ + * recent ODP_ACTION_ATTR_SET_PRIORITY. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -193,6 +203,8 @@ static void rule_insert(struct ofproto *, struct rule *); static void rule_remove(struct ofproto *, struct rule *); static void rule_send_removed(struct ofproto *, struct rule *, uint8_t reason); +static void rule_get_stats(const struct rule *, uint64_t *packets, + uint64_t *bytes); /* An exact-match instantiation of an OpenFlow flow. */ struct facet { @@ -205,7 +217,8 @@ struct facet { * * - Do include packets and bytes that were obtained from the datapath * when a flow was deleted (e.g. dpif_flow_del()) or when its - * statistics were reset (e.g. dpif_flow_put() with ODPPF_ZERO_STATS). + * statistics were reset (e.g. dpif_flow_put() with + * DPIF_FP_ZERO_STATS). * * - Do not include any packets or bytes that can currently be obtained * from the datapath by, e.g., dpif_flow_get(). @@ -213,6 +226,13 @@ struct facet { uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ + uint64_t dp_packet_count; /* Last known packet count in the datapath. */ + uint64_t dp_byte_count; /* Last known byte count in the datapath. */ + + uint64_t rs_packet_count; /* Packets pushed to resubmit children. */ + uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */ + long long int rs_used; /* Used time pushed to resubmit children. */ + /* Number of bytes passed to account_cb. This may include bytes that can * currently obtained from the datapath (thus, it can be greater than * byte_count). */ @@ -225,7 +245,7 @@ struct facet { bool installed; /* Installed in datapath? */ bool may_install; /* True ordinarily; false if actions must * be reassessed for every packet. */ - unsigned int actions_len; /* Number of bytes in actions[]. */ + size_t actions_len; /* Number of bytes in actions[]. */ struct nlattr *actions; /* Datapath actions. */ tag_type tags; /* Tags (set only by hooks). */ struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */ @@ -247,7 +267,8 @@ static void facet_flush_stats(struct ofproto *, struct facet *); static void facet_make_actions(struct ofproto *, struct facet *, const struct ofpbuf *packet); static void facet_update_stats(struct ofproto *, struct facet *, - const struct odp_flow_stats *); + const struct dpif_flow_stats *); +static void facet_push_stats(struct ofproto *, struct facet *); /* ofproto supports two kinds of OpenFlow connections: * @@ -299,7 +320,8 @@ struct ofconn { /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ - struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */ +#define N_SCHEDULERS 2 + struct pinsched *schedulers[N_SCHEDULERS]; struct pktbuf *pktbuf; /* OpenFlow packet buffers. */ int miss_send_len; /* Bytes to send of buffered packets. */ @@ -312,35 +334,39 @@ struct ofconn { /* type == OFCONN_PRIMARY only. */ enum nx_role role; /* Role. */ struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */ - struct discovery *discovery; /* Controller discovery object, if enabled. */ - struct status_category *ss; /* Switch status category. */ enum ofproto_band band; /* In-band or out-of-band? */ }; -/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's - * "schedulers" array. Their values are 0 and 1, and their meanings and values - * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In - * case anything ever changes, check their values here. */ -#define N_SCHEDULERS 2 -BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0); -BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR); -BUILD_ASSERT_DECL(OFPR_ACTION == 1); -BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR); static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, enum ofconn_type); static void ofconn_destroy(struct ofconn *); static void ofconn_run(struct ofconn *); static void ofconn_wait(struct ofconn *); + static bool ofconn_receives_async_msgs(const struct ofconn *); static char *ofconn_make_name(const struct ofproto *, const char *target); static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst); +static struct ofproto *ofconn_get_ofproto(struct ofconn *); + +static enum nx_flow_format ofconn_get_flow_format(struct ofconn *); +static void ofconn_set_flow_format(struct ofconn *, enum nx_flow_format); + +static int ofconn_get_miss_send_len(const struct ofconn *); +static void ofconn_set_miss_send_len(struct ofconn *, int miss_send_len); + +static enum ofconn_type ofconn_get_type(const struct ofconn *); + +static enum nx_role ofconn_get_role(const struct ofconn *); +static void ofconn_set_role(struct ofconn *, enum nx_role); + static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn, struct rconn_packet_counter *counter); -static void send_packet_in(struct ofproto *, struct ofpbuf *odp_msg); -static void do_send_packet_in(struct ofpbuf *odp_msg, void *ofconn); +static void send_packet_in(struct ofproto *, struct dpif_upcall *, + const struct flow *, bool clone); +static void do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn); struct ofproto { /* Settings. */ @@ -360,7 +386,6 @@ struct ofproto { uint32_t max_ports; /* Configuration. */ - struct switch_status *switch_status; struct fail_open *fail_open; struct netflow *netflow; struct ofproto_sflow *sflow; @@ -410,8 +435,11 @@ static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); static int ofproto_expire(struct ofproto *); +static void flow_push_stats(struct ofproto *, const struct rule *, + struct flow *, uint64_t packets, uint64_t bytes, + long long int used); -static void handle_odp_msg(struct ofproto *, struct ofpbuf *); +static void handle_upcall(struct ofproto *, struct dpif_upcall *); static void handle_openflow(struct ofconn *, struct ofpbuf *); @@ -427,7 +455,6 @@ ofproto_create(const char *datapath, const char *datapath_type, const struct ofhooks *ofhooks, void *aux, struct ofproto **ofprotop) { - struct odp_stats stats; struct ofproto *p; struct dpif *dpif; int error; @@ -442,14 +469,10 @@ ofproto_create(const char *datapath, const char *datapath_type, VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error)); return error; } - error = dpif_get_dp_stats(dpif, &stats); - if (error) { - VLOG_ERR("failed to obtain stats for datapath %s: %s", - datapath, strerror(error)); - dpif_close(dpif); - return error; - } - error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW); + error = dpif_recv_set_mask(dpif, + ((1u << DPIF_UC_MISS) | + (1u << DPIF_UC_ACTION) | + (1u << DPIF_UC_SAMPLE))); if (error) { VLOG_ERR("failed to listen on datapath %s: %s", datapath, strerror(error)); @@ -474,10 +497,9 @@ ofproto_create(const char *datapath, const char *datapath_type, p->netdev_monitor = netdev_monitor_create(); hmap_init(&p->ports); shash_init(&p->port_by_name); - p->max_ports = stats.max_ports; + p->max_ports = dpif_get_max_ports(dpif); /* Initialize submodules. */ - p->switch_status = switch_status_create(p); p->fail_open = NULL; p->netflow = NULL; p->sflow = NULL; @@ -537,82 +559,47 @@ ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id) } } -static bool -is_discovery_controller(const struct ofproto_controller *c) -{ - return !strcmp(c->target, "discover"); -} - -static bool -is_in_band_controller(const struct ofproto_controller *c) -{ - return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND; -} - /* Creates a new controller in 'ofproto'. Some of the settings are initially * drawn from 'c', but update_controller() needs to be called later to finish * the new ofconn's configuration. */ static void add_controller(struct ofproto *ofproto, const struct ofproto_controller *c) { - struct discovery *discovery; + char *name = ofconn_make_name(ofproto, c->target); struct ofconn *ofconn; - if (is_discovery_controller(c)) { - int error = discovery_create(c->accept_re, c->update_resolv_conf, - ofproto->dpif, ofproto->switch_status, - &discovery); - if (error) { - return; - } - } else { - discovery = NULL; - } - ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_PRIMARY); ofconn->pktbuf = pktbuf_create(); ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN; - if (discovery) { - ofconn->discovery = discovery; - } else { - char *name = ofconn_make_name(ofproto, c->target); - rconn_connect(ofconn->rconn, c->target, name); - free(name); - } + rconn_connect(ofconn->rconn, c->target, name); hmap_insert(&ofproto->controllers, &ofconn->hmap_node, hash_string(c->target, 0)); + + free(name); } /* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's - * target or turn discovery on or off (these are done by creating new ofconns - * and deleting old ones), but it can update the rest of an ofconn's - * settings. */ + * target (this is done by creating new ofconns and deleting old ones), but it + * can update the rest of an ofconn's settings. */ static void update_controller(struct ofconn *ofconn, const struct ofproto_controller *c) { int probe_interval; - ofconn->band = (is_in_band_controller(c) - ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND); + ofconn->band = c->band; rconn_set_max_backoff(ofconn->rconn, c->max_backoff); probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0; rconn_set_probe_interval(ofconn->rconn, probe_interval); - if (ofconn->discovery) { - discovery_set_update_resolv_conf(ofconn->discovery, - c->update_resolv_conf); - discovery_set_accept_controller_re(ofconn->discovery, c->accept_re); - } - ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit); } static const char * ofconn_get_target(const struct ofconn *ofconn) { - return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn); + return rconn_get_target(ofconn->rconn); } static struct ofconn * @@ -635,7 +622,6 @@ update_in_band_remotes(struct ofproto *ofproto) const struct ofconn *ofconn; struct sockaddr_in *addrs; size_t max_addrs, n_addrs; - bool discovery; size_t i; /* Allocate enough memory for as many remotes as we could possibly have. */ @@ -644,7 +630,6 @@ update_in_band_remotes(struct ofproto *ofproto) n_addrs = 0; /* Add all the remotes. */ - discovery = false; HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) { struct sockaddr_in *sin = &addrs[n_addrs]; @@ -657,23 +642,15 @@ update_in_band_remotes(struct ofproto *ofproto) sin->sin_port = rconn_get_remote_port(ofconn->rconn); n_addrs++; } - if (ofconn->discovery) { - discovery = true; - } } for (i = 0; i < ofproto->n_extra_remotes; i++) { addrs[n_addrs++] = ofproto->extra_in_band_remotes[i]; } - /* Create or update or destroy in-band. - * - * Ordinarily we only enable in-band if there's at least one remote - * address, but discovery needs the in-band rules for DHCP to be installed - * even before we know any remote addresses. */ - if (n_addrs || discovery) { + /* Create or update or destroy in-band. */ + if (n_addrs) { if (!ofproto->in_band) { - in_band_create(ofproto, ofproto->dpif, ofproto->switch_status, - &ofproto->in_band); + in_band_create(ofproto, ofproto->dpif, &ofproto->in_band); } if (ofproto->in_band) { in_band_set_remotes(ofproto->in_band, addrs, n_addrs); @@ -700,7 +677,7 @@ update_fail_open(struct ofproto *p) size_t n; if (!p->fail_open) { - p->fail_open = fail_open_create(p, p->switch_status); + p->fail_open = fail_open_create(p); } n = 0; @@ -725,7 +702,6 @@ ofproto_set_controllers(struct ofproto *p, struct shash new_controllers; struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice, *next_ofservice; - bool ss_exists; size_t i; /* Create newly configured controllers and services. @@ -734,7 +710,7 @@ ofproto_set_controllers(struct ofproto *p, for (i = 0; i < n_controllers; i++) { const struct ofproto_controller *c = &controllers[i]; - if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) { + if (!vconn_verify_name(c->target)) { if (!find_controller_by_target(p, c->target)) { add_controller(p, c); } @@ -753,7 +729,6 @@ ofproto_set_controllers(struct ofproto *p, /* Delete controllers that are no longer configured. * Update configuration of all now-existing controllers. */ - ss_exists = false; HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) { struct ofproto_controller *c; @@ -762,9 +737,6 @@ ofproto_set_controllers(struct ofproto *p, ofconn_destroy(ofconn); } else { update_controller(ofconn, c); - if (ofconn->ss) { - ss_exists = true; - } } } @@ -786,13 +758,6 @@ ofproto_set_controllers(struct ofproto *p, update_in_band_remotes(p); update_fail_open(p); - - if (!hmap_is_empty(&p->controllers) && !ss_exists) { - ofconn = CONTAINER_OF(hmap_first(&p->controllers), - struct ofconn, hmap_node); - ofconn->ss = switch_status_register(p->switch_status, "remote", - rconn_status_cb, ofconn->rconn); - } } void @@ -999,7 +964,70 @@ ofproto_set_sflow(struct ofproto *ofproto, ofproto->sflow = NULL; } } + +/* Connectivity Fault Management configuration. */ + +/* Clears the CFM configuration from 'port_no' on 'ofproto'. */ +void +ofproto_iface_clear_cfm(struct ofproto *ofproto, uint32_t port_no) +{ + struct ofport *ofport = get_port(ofproto, port_no); + if (ofport && ofport->cfm){ + cfm_destroy(ofport->cfm); + ofport->cfm = NULL; + } +} +/* Configures connectivity fault management on 'port_no' in 'ofproto'. Takes + * basic configuration from the configuration members in 'cfm', and the set of + * remote maintenance points from the 'n_remote_mps' elements in 'remote_mps'. + * Ignores the statistics members of 'cfm'. + * + * This function has no effect if 'ofproto' does not have a port 'port_no'. */ +void +ofproto_iface_set_cfm(struct ofproto *ofproto, uint32_t port_no, + const struct cfm *cfm, + const uint16_t *remote_mps, size_t n_remote_mps) +{ + struct ofport *ofport; + + ofport = get_port(ofproto, port_no); + if (!ofport) { + VLOG_WARN("%s: cannot configure CFM on nonexistent port %"PRIu32, + dpif_name(ofproto->dpif), port_no); + return; + } + + if (!ofport->cfm) { + ofport->cfm = cfm_create(); + } + + ofport->cfm->mpid = cfm->mpid; + ofport->cfm->interval = cfm->interval; + memcpy(ofport->cfm->maid, cfm->maid, CCM_MAID_LEN); + + cfm_update_remote_mps(ofport->cfm, remote_mps, n_remote_mps); + + if (!cfm_configure(ofport->cfm)) { + VLOG_WARN("%s: CFM configuration on port %"PRIu32" (%s) failed", + dpif_name(ofproto->dpif), port_no, + netdev_get_name(ofport->netdev)); + cfm_destroy(ofport->cfm); + ofport->cfm = NULL; + } +} + +/* Returns the connectivity fault management object associated with 'port_no' + * within 'ofproto', or a null pointer if 'ofproto' does not have a port + * 'port_no' or if that port does not have CFM configured. The caller must not + * modify or destroy the returned object. */ +const struct cfm * +ofproto_iface_get_cfm(struct ofproto *ofproto, uint32_t port_no) +{ + struct ofport *ofport = get_port(ofproto, port_no); + return ofport ? ofport->cfm : NULL; +} + uint64_t ofproto_get_datapath_id(const struct ofproto *ofproto) { @@ -1067,7 +1095,6 @@ ofproto_destroy(struct ofproto *p) } shash_destroy(&p->port_by_name); - switch_status_destroy(p->switch_status); netflow_destroy(p->netflow); ofproto_sflow_destroy(p->sflow); @@ -1121,7 +1148,7 @@ process_port_change(struct ofproto *ofproto, int error, char *devname) static int snoop_preference(const struct ofconn *ofconn) { - switch (ofconn->role) { + switch (ofconn_get_role(ofconn)) { case NX_ROLE_MASTER: return 3; case NX_ROLE_OTHER: @@ -1144,7 +1171,7 @@ add_snooper(struct ofproto *ofproto, struct vconn *vconn) /* Pick a controller for monitoring. */ best = NULL; LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) { - if (ofconn->type == OFCONN_PRIMARY + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY && (!best || snoop_preference(ofconn) > snoop_preference(best))) { best = ofconn; } @@ -1163,6 +1190,7 @@ ofproto_run1(struct ofproto *p) { struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice; + struct ofport *ofport; char *devname; int error; int i; @@ -1172,9 +1200,9 @@ ofproto_run1(struct ofproto *p) } for (i = 0; i < 50; i++) { - struct ofpbuf *buf; + struct dpif_upcall packet; - error = dpif_recv(p->dpif, &buf); + error = dpif_recv(p->dpif, &packet); if (error) { if (error == ENODEV) { /* Someone destroyed the datapath behind our back. The caller @@ -1188,7 +1216,7 @@ ofproto_run1(struct ofproto *p) break; } - handle_odp_msg(p, buf); + handle_upcall(p, &packet); } while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) { @@ -1199,6 +1227,10 @@ ofproto_run1(struct ofproto *p) process_port_change(p, error, devname); } + HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { + ofport_run(p, ofport); + } + if (p->in_band) { if (time_msec() >= p->next_in_band_update) { update_in_band_remotes(p); @@ -1299,11 +1331,15 @@ ofproto_wait(struct ofproto *p) { struct ofservice *ofservice; struct ofconn *ofconn; + struct ofport *ofport; size_t i; dpif_recv_wait(p->dpif); dpif_port_poll_wait(p->dpif); netdev_monitor_poll_wait(p->netdev_monitor); + HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { + ofport_wait(ofport); + } LIST_FOR_EACH (ofconn, node, &p->all_conns) { ofconn_wait(ofconn); } @@ -1353,6 +1389,68 @@ ofproto_is_alive(const struct ofproto *p) return !hmap_is_empty(&p->controllers); } +void +ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, + struct shash *info) +{ + const struct ofconn *ofconn; + + shash_init(info); + + HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) { + const struct rconn *rconn = ofconn->rconn; + time_t now = time_now(); + time_t last_connection = rconn_get_last_connection(rconn); + time_t last_disconnect = rconn_get_last_disconnect(rconn); + const int last_error = rconn_get_last_error(rconn); + struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo); + + shash_add(info, rconn_get_target(rconn), cinfo); + + cinfo->is_connected = rconn_is_connected(rconn); + cinfo->role = ofconn_get_role(ofconn); + + cinfo->pairs.n = 0; + + if (last_error) { + cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; + cinfo->pairs.values[cinfo->pairs.n++] = + xstrdup(ovs_retval_to_string(last_error)); + } + + cinfo->pairs.keys[cinfo->pairs.n] = "state"; + cinfo->pairs.values[cinfo->pairs.n++] = + xstrdup(rconn_get_state(rconn)); + + if (last_connection != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_connection)); + } + + if (last_disconnect != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_disconnect)); + } + } +} + +void +ofproto_free_ofproto_controller_info(struct shash *info) +{ + struct shash_node *node; + + SHASH_FOR_EACH (node, info) { + struct ofproto_controller_info *cinfo = node->data; + while (cinfo->pairs.n) { + free((char *) cinfo->pairs.values[--cinfo->pairs.n]); + } + free(cinfo); + } + shash_destroy(info); +} + /* Deletes port number 'odp_port' from the datapath for 'ofproto'. * * This is almost the same as calling dpif_port_del() directly on the @@ -1395,24 +1493,34 @@ ofproto_port_is_floodable(struct ofproto *ofproto, uint16_t odp_port) return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD); } +/* Sends 'packet' out of port 'port_no' within 'p'. If 'vlan_tci' is zero the + * packet will not have any 802.1Q hader; if it is nonzero, then the packet + * will be sent with the VLAN TCI specified by 'vlan_tci & ~VLAN_CFI'. + * + * Returns 0 if successful, otherwise a positive errno value. */ int -ofproto_send_packet(struct ofproto *p, const struct flow *flow, - const union ofp_action *actions, size_t n_actions, +ofproto_send_packet(struct ofproto *ofproto, + uint32_t port_no, uint16_t vlan_tci, const struct ofpbuf *packet) { - struct action_xlate_ctx ctx; - struct ofpbuf *odp_actions; - - action_xlate_ctx_init(&ctx, p, flow, packet); - odp_actions = xlate_actions(&ctx, actions, n_actions); - - /* XXX Should we translate the dpif_execute() errno value into an OpenFlow - * error code? */ - dpif_execute(p->dpif, odp_actions->data, odp_actions->size, packet); + struct ofpbuf odp_actions; + int error; - ofpbuf_delete(odp_actions); + ofpbuf_init(&odp_actions, 32); + if (vlan_tci != 0) { + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, + ntohs(vlan_tci & ~VLAN_CFI)); + } + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, port_no); + error = dpif_execute(ofproto->dpif, odp_actions.data, odp_actions.size, + packet); + ofpbuf_uninit(&odp_actions); - return 0; + if (error) { + VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)", + dpif_name(ofproto->dpif), port_no, strerror(error)); + } + return error; } /* Adds a flow to the OpenFlow flow table in 'p' that matches 'cls_rule' and @@ -1460,6 +1568,8 @@ ofproto_flush_flows(struct ofproto *ofproto) * individually since we are about to blow away all the facets with * dpif_flow_flush(). */ facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; facet_remove(ofproto, facet); } @@ -1480,33 +1590,30 @@ ofproto_flush_flows(struct ofproto *ofproto) static void reinit_ports(struct ofproto *p) { - struct svec devnames; + struct dpif_port_dump dump; + struct shash_node *node; + struct shash devnames; struct ofport *ofport; - struct odp_port *odp_ports; - size_t n_odp_ports; - size_t i; + struct dpif_port dpif_port; COVERAGE_INC(ofproto_reinit_ports); - svec_init(&devnames); + shash_init(&devnames); HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { - svec_add (&devnames, ofport->opp.name); + shash_add_once (&devnames, ofport->opp.name, NULL); } - dpif_port_list(p->dpif, &odp_ports, &n_odp_ports); - for (i = 0; i < n_odp_ports; i++) { - svec_add (&devnames, odp_ports[i].devname); + DPIF_PORT_FOR_EACH (&dpif_port, &dump, p->dpif) { + shash_add_once (&devnames, dpif_port.name, NULL); } - free(odp_ports); - svec_sort_unique(&devnames); - for (i = 0; i < devnames.n; i++) { - update_port(p, devnames.names[i]); + SHASH_FOR_EACH (node, &devnames) { + update_port(p, node->name); } - svec_destroy(&devnames); + shash_destroy(&devnames); } static struct ofport * -make_ofport(const struct odp_port *odp_port) +make_ofport(const struct dpif_port *dpif_port) { struct netdev_options netdev_options; enum netdev_flags flags; @@ -1515,27 +1622,25 @@ make_ofport(const struct odp_port *odp_port) int error; memset(&netdev_options, 0, sizeof netdev_options); - netdev_options.name = odp_port->devname; - netdev_options.type = odp_port->type; + netdev_options.name = dpif_port->name; + netdev_options.type = dpif_port->type; netdev_options.ethertype = NETDEV_ETH_TYPE_NONE; error = netdev_open(&netdev_options, &netdev); if (error) { VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s " "cannot be opened (%s)", - odp_port->devname, odp_port->port, - odp_port->devname, strerror(error)); + dpif_port->name, dpif_port->port_no, + dpif_port->name, strerror(error)); return NULL; } - ofport = xmalloc(sizeof *ofport); + ofport = xzalloc(sizeof *ofport); ofport->netdev = netdev; - ofport->odp_port = odp_port->port; - ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port); + ofport->odp_port = dpif_port->port_no; + ofport->opp.port_no = odp_port_to_ofp_port(dpif_port->port_no); netdev_get_etheraddr(netdev, ofport->opp.hw_addr); - memcpy(ofport->opp.name, odp_port->devname, - MIN(sizeof ofport->opp.name, sizeof odp_port->devname)); - ofport->opp.name[sizeof ofport->opp.name - 1] = '\0'; + ovs_strlcpy(ofport->opp.name, dpif_port->name, sizeof ofport->opp.name); netdev_get_flags(netdev, &flags); ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN; @@ -1549,15 +1654,15 @@ make_ofport(const struct odp_port *odp_port) } static bool -ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port) +ofport_conflicts(const struct ofproto *p, const struct dpif_port *dpif_port) { - if (get_port(p, odp_port->port)) { + if (get_port(p, dpif_port->port_no)) { VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath", - odp_port->port); + dpif_port->port_no); return true; - } else if (shash_find(&p->port_by_name, odp_port->devname)) { + } else if (shash_find(&p->port_by_name, dpif_port->name)) { VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath", - odp_port->devname); + dpif_port->name); return true; } else { return false; @@ -1594,7 +1699,7 @@ send_port_status(struct ofproto *p, const struct ofport *ofport, /* Primary controllers, even slaves, should always get port status updates. Otherwise obey ofconn_receives_async_msgs(). */ - if (ofconn->type != OFCONN_PRIMARY + if (ofconn_get_type(ofconn) != OFCONN_PRIMARY && !ofconn_receives_async_msgs(ofconn)) { continue; } @@ -1632,10 +1737,39 @@ ofport_remove(struct ofproto *p, struct ofport *ofport) } } +static void +ofport_run(struct ofproto *ofproto, struct ofport *ofport) +{ + if (ofport->cfm) { + cfm_run(ofport->cfm); + + if (cfm_should_send_ccm(ofport->cfm)) { + struct ofpbuf packet; + struct ccm *ccm; + + ofpbuf_init(&packet, 0); + ccm = compose_packet(&packet, eth_addr_ccm, ofport->opp.hw_addr, + ETH_TYPE_CFM, sizeof *ccm); + cfm_compose_ccm(ofport->cfm, ccm); + ofproto_send_packet(ofproto, ofport->odp_port, 0, &packet); + ofpbuf_uninit(&packet); + } + } +} + +static void +ofport_wait(struct ofport *ofport) +{ + if (ofport->cfm) { + cfm_wait(ofport->cfm); + } +} + static void ofport_free(struct ofport *ofport) { if (ofport) { + cfm_destroy(ofport->cfm); netdev_close(ofport->netdev); free(ofport); } @@ -1658,7 +1792,7 @@ get_port(const struct ofproto *ofproto, uint16_t odp_port) static void update_port(struct ofproto *p, const char *devname) { - struct odp_port odp_port; + struct dpif_port dpif_port; struct ofport *old_ofport; struct ofport *new_ofport; int error; @@ -1666,7 +1800,7 @@ update_port(struct ofproto *p, const char *devname) COVERAGE_INC(ofproto_update_port); /* Query the datapath for port information. */ - error = dpif_port_query_by_name(p->dpif, devname, &odp_port); + error = dpif_port_query_by_name(p->dpif, devname, &dpif_port); /* Find the old ofport. */ old_ofport = shash_find_data(&p->port_by_name, devname); @@ -1682,20 +1816,20 @@ update_port(struct ofproto *p, const char *devname) * reliably but more portably by comparing the old port's MAC * against the new port's MAC. However, this code isn't that smart * and always sends an OFPPR_MODIFY (XXX). */ - old_ofport = get_port(p, odp_port.port); + old_ofport = get_port(p, dpif_port.port_no); } } else if (error != ENOENT && error != ENODEV) { VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error " "%s", strerror(error)); - return; + goto exit; } /* Create a new ofport. */ - new_ofport = !error ? make_ofport(&odp_port) : NULL; + new_ofport = !error ? make_ofport(&dpif_port) : NULL; /* Eliminate a few pathological cases. */ if (!old_ofport && !new_ofport) { - return; + goto exit; } else if (old_ofport && new_ofport) { /* Most of the 'config' bits are OpenFlow soft state, but * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the @@ -1706,7 +1840,7 @@ update_port(struct ofproto *p, const char *devname) if (ofport_equal(old_ofport, new_ofport)) { /* False alarm--no change. */ ofport_free(new_ofport); - return; + goto exit; } } @@ -1722,31 +1856,26 @@ update_port(struct ofproto *p, const char *devname) : !new_ofport ? OFPPR_DELETE : OFPPR_MODIFY)); ofport_free(old_ofport); + +exit: + dpif_port_destroy(&dpif_port); } static int init_ports(struct ofproto *p) { - struct odp_port *ports; - size_t n_ports; - size_t i; - int error; - - error = dpif_port_list(p->dpif, &ports, &n_ports); - if (error) { - return error; - } + struct dpif_port_dump dump; + struct dpif_port dpif_port; - for (i = 0; i < n_ports; i++) { - const struct odp_port *odp_port = &ports[i]; - if (!ofport_conflicts(p, odp_port)) { - struct ofport *ofport = make_ofport(odp_port); + DPIF_PORT_FOR_EACH (&dpif_port, &dump, p->dpif) { + if (!ofport_conflicts(p, &dpif_port)) { + struct ofport *ofport = make_ofport(&dpif_port); if (ofport) { ofport_install(p, ofport); } } } - free(ports); + return 0; } @@ -1770,13 +1899,13 @@ ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type) static void ofconn_destroy(struct ofconn *ofconn) { - if (ofconn->type == OFCONN_PRIMARY) { - hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node); + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); + + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) { + hmap_remove(&ofproto->controllers, &ofconn->hmap_node); } - discovery_destroy(ofconn->discovery); list_remove(&ofconn->node); - switch_status_unregister(ofconn->ss); rconn_destroy(ofconn->rconn); rconn_packet_counter_destroy(ofconn->packet_in_counter); rconn_packet_counter_destroy(ofconn->reply_counter); @@ -1787,26 +1916,10 @@ ofconn_destroy(struct ofconn *ofconn) static void ofconn_run(struct ofconn *ofconn) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); int iteration; size_t i; - if (ofconn->discovery) { - char *controller_name; - if (rconn_is_connectivity_questionable(ofconn->rconn)) { - discovery_question_connectivity(ofconn->discovery); - } - if (discovery_run(ofconn->discovery, &controller_name)) { - if (controller_name) { - char *ofconn_name = ofconn_make_name(p, controller_name); - rconn_connect(ofconn->rconn, controller_name, ofconn_name); - free(ofconn_name); - } else { - rconn_disconnect(ofconn->rconn); - } - } - } - for (i = 0; i < N_SCHEDULERS; i++) { pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn); } @@ -1829,7 +1942,7 @@ ofconn_run(struct ofconn *ofconn) } } - if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) { + if (!rconn_is_alive(ofconn->rconn)) { ofconn_destroy(ofconn); } } @@ -1839,9 +1952,6 @@ ofconn_wait(struct ofconn *ofconn) { int i; - if (ofconn->discovery) { - discovery_wait(ofconn->discovery); - } for (i = 0; i < N_SCHEDULERS; i++) { pinsched_wait(ofconn->schedulers[i]); } @@ -1857,10 +1967,10 @@ ofconn_wait(struct ofconn *ofconn) static bool ofconn_receives_async_msgs(const struct ofconn *ofconn) { - if (ofconn->type == OFCONN_PRIMARY) { + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) { /* Primary controllers always get asynchronous messages unless they * have configured themselves as "slaves". */ - return ofconn->role != NX_ROLE_SLAVE; + return ofconn_get_role(ofconn) != NX_ROLE_SLAVE; } else { /* Service connections don't get asynchronous messages unless they have * explicitly asked for them by setting a nonzero miss send length. */ @@ -1890,8 +2000,7 @@ ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst) if (rate > 0) { if (!*s) { - *s = pinsched_create(rate, burst, - ofconn->ofproto->switch_status); + *s = pinsched_create(rate, burst); } else { pinsched_set_limits(*s, rate, burst); } @@ -1901,6 +2010,54 @@ ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst) } } } + +static struct ofproto * +ofconn_get_ofproto(struct ofconn *ofconn) +{ + return ofconn->ofproto; +} + +static enum nx_flow_format +ofconn_get_flow_format(struct ofconn *ofconn) +{ + return ofconn->flow_format; +} + +static void +ofconn_set_flow_format(struct ofconn *ofconn, enum nx_flow_format flow_format) +{ + ofconn->flow_format = flow_format; +} + +static int +ofconn_get_miss_send_len(const struct ofconn *ofconn) +{ + return ofconn->miss_send_len; +} + +static void +ofconn_set_miss_send_len(struct ofconn *ofconn, int miss_send_len) +{ + ofconn->miss_send_len = miss_send_len; +} + +static enum ofconn_type +ofconn_get_type(const struct ofconn *ofconn) +{ + return ofconn->type; +} + +static enum nx_role +ofconn_get_role(const struct ofconn *ofconn) +{ + return ofconn->role; +} + +static void +ofconn_set_role(struct ofconn *ofconn, enum nx_role role) +{ + ofconn->role = role; +} static void ofservice_reconfigure(struct ofservice *ofservice, @@ -2049,25 +2206,27 @@ rule_has_out_port(const struct rule *rule, ovs_be16 out_port) * * Takes ownership of 'packet'. */ static bool -execute_odp_actions(struct ofproto *ofproto, uint16_t in_port, - const struct nlattr *odp_actions, unsigned int actions_len, +execute_odp_actions(struct ofproto *ofproto, const struct flow *flow, + const struct nlattr *odp_actions, size_t actions_len, struct ofpbuf *packet) { - if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint32_t)) - && odp_actions->nla_type == ODPAT_CONTROLLER) { + if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) + && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ - struct odp_msg *msg; + struct dpif_upcall upcall; - msg = ofpbuf_push_uninit(packet, sizeof *msg); - msg->type = _ODPL_ACTION_NR; - msg->length = sizeof(struct odp_msg) + packet->size; - msg->port = in_port; - msg->reserved = 0; - msg->arg = nl_attr_get_u32(odp_actions); + upcall.type = DPIF_UC_ACTION; + upcall.packet = packet; + upcall.key = NULL; + upcall.key_len = 0; + upcall.userdata = nl_attr_get_u64(odp_actions); + upcall.sample_pool = 0; + upcall.actions = NULL; + upcall.actions_len = 0; - send_packet_in(ofproto, packet); + send_packet_in(ofproto, &upcall, flow, false); return true; } else { @@ -2095,17 +2254,15 @@ static void facet_execute(struct ofproto *ofproto, struct facet *facet, struct ofpbuf *packet) { - struct odp_flow_stats stats; + struct dpif_flow_stats stats; assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in)); flow_extract_stats(&facet->flow, packet, &stats); - if (execute_odp_actions(ofproto, facet->flow.in_port, + stats.used = time_msec(); + if (execute_odp_actions(ofproto, &facet->flow, facet->actions, facet->actions_len, packet)) { facet_update_stats(ofproto, facet, &stats); - facet->used = time_msec(); - netflow_flow_update_time(ofproto->netflow, - &facet->nf_flow, facet->used); } } @@ -2152,11 +2309,12 @@ rule_execute(struct ofproto *ofproto, struct rule *rule, uint16_t in_port, action_xlate_ctx_init(&ctx, ofproto, &flow, packet); odp_actions = xlate_actions(&ctx, rule->actions, rule->n_actions); size = packet->size; - if (execute_odp_actions(ofproto, in_port, odp_actions->data, + if (execute_odp_actions(ofproto, &flow, odp_actions->data, odp_actions->size, packet)) { rule->used = time_msec(); rule->packet_count++; rule->byte_count += size; + flow_push_stats(ofproto, rule, &flow, 1, size, rule->used); } ofpbuf_delete(odp_actions); } @@ -2251,6 +2409,9 @@ facet_make_actions(struct ofproto *p, struct facet *facet, action_xlate_ctx_init(&ctx, p, &facet->flow, packet); odp_actions = xlate_actions(&ctx, rule->actions, rule->n_actions); + facet->tags = ctx.tags; + facet->may_install = ctx.may_set_up_flow; + facet->nf_flow.output_iface = ctx.nf_output_iface; if (facet->actions_len != odp_actions->size || memcmp(facet->actions, odp_actions->data, odp_actions->size)) { @@ -2263,16 +2424,27 @@ facet_make_actions(struct ofproto *p, struct facet *facet, } static int -facet_put__(struct ofproto *ofproto, struct facet *facet, int flags, - struct odp_flow_put *put) +facet_put__(struct ofproto *ofproto, struct facet *facet, + const struct nlattr *actions, size_t actions_len, + struct dpif_flow_stats *stats) { - memset(&put->flow.stats, 0, sizeof put->flow.stats); - odp_flow_key_from_flow(&put->flow.key, &facet->flow); - put->flow.actions = facet->actions; - put->flow.actions_len = facet->actions_len; - put->flow.flags = 0; - put->flags = flags; - return dpif_flow_put(ofproto->dpif, put); + uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; + enum dpif_flow_put_flags flags; + struct ofpbuf key; + + flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; + if (stats) { + flags |= DPIF_FP_ZERO_STATS; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } + + ofpbuf_use_stack(&key, keybuf, sizeof keybuf); + odp_flow_key_from_flow(&key, &facet->flow); + assert(key.base == keybuf); + + return dpif_flow_put(ofproto->dpif, flags, key.data, key.size, + actions, actions_len, stats); } /* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If @@ -2281,17 +2453,12 @@ facet_put__(struct ofproto *ofproto, struct facet *facet, int flags, static void facet_install(struct ofproto *p, struct facet *facet, bool zero_stats) { - if (facet->may_install) { - struct odp_flow_put put; - int flags; + struct dpif_flow_stats stats; - flags = ODPPF_CREATE | ODPPF_MODIFY; - if (zero_stats) { - flags |= ODPPF_ZERO_STATS; - } - if (!facet_put__(p, facet, flags, &put)) { - facet->installed = true; - } + if (facet->may_install + && !facet_put__(p, facet, facet->actions, facet->actions_len, + zero_stats ? &stats : NULL)) { + facet->installed = true; } } @@ -2318,16 +2485,23 @@ static void facet_uninstall(struct ofproto *p, struct facet *facet) { if (facet->installed) { - struct odp_flow odp_flow; - - odp_flow_key_from_flow(&odp_flow.key, &facet->flow); - odp_flow.actions = NULL; - odp_flow.actions_len = 0; - odp_flow.flags = 0; - if (!dpif_flow_del(p->dpif, &odp_flow)) { - facet_update_stats(p, facet, &odp_flow.stats); + uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; + struct dpif_flow_stats stats; + struct ofpbuf key; + + ofpbuf_use_stack(&key, keybuf, sizeof keybuf); + odp_flow_key_from_flow(&key, &facet->flow); + assert(key.base == keybuf); + + if (!dpif_flow_del(p->dpif, key.data, key.size, &stats)) { + facet_update_stats(p, facet, &stats); } facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } else { + assert(facet->dp_packet_count == 0); + assert(facet->dp_byte_count == 0); } } @@ -2344,10 +2518,16 @@ facet_is_controller_flow(struct facet *facet) } /* Folds all of 'facet''s statistics into its rule. Also updates the - * accounting ofhook and emits a NetFlow expiration if appropriate. */ + * accounting ofhook and emits a NetFlow expiration if appropriate. All of + * 'facet''s statistics in the datapath should have been zeroed and folded into + * its packet and byte counts before this function is called. */ static void facet_flush_stats(struct ofproto *ofproto, struct facet *facet) { + assert(!facet->dp_byte_count); + assert(!facet->dp_packet_count); + + facet_push_stats(ofproto, facet); facet_account(ofproto, facet, 0); if (ofproto->netflow && !facet_is_controller_flow(facet)) { @@ -2366,6 +2546,8 @@ facet_flush_stats(struct ofproto *ofproto, struct facet *facet) * reinstalled. */ facet->packet_count = 0; facet->byte_count = 0; + facet->rs_packet_count = 0; + facet->rs_byte_count = 0; facet->accounted_bytes = 0; netflow_flow_clear(&facet->nf_flow); @@ -2454,19 +2636,13 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) /* If the ODP actions changed or the installability changed, then we need * to talk to the datapath. */ - if (actions_changed || facet->may_install != facet->installed) { - if (facet->may_install) { - struct odp_flow_put put; - - memset(&put.flow.stats, 0, sizeof put.flow.stats); - odp_flow_key_from_flow(&put.flow.key, &facet->flow); - put.flow.actions = odp_actions->data; - put.flow.actions_len = odp_actions->size; - put.flow.flags = 0; - put.flags = ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS; - dpif_flow_put(ofproto->dpif, &put); - - facet_update_stats(ofproto, facet, &put.flow.stats); + if (actions_changed || ctx.may_set_up_flow != facet->installed) { + if (ctx.may_set_up_flow) { + struct dpif_flow_stats stats; + + facet_put__(ofproto, facet, + odp_actions->data, odp_actions->size, &stats); + facet_update_stats(ofproto, facet, &stats); } else { facet_uninstall(ofproto, facet); } @@ -2476,8 +2652,6 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) facet_flush_stats(ofproto, facet); } - ofpbuf_delete(odp_actions); - /* Update 'facet' now that we've taken care of all the old state. */ facet->tags = ctx.tags; facet->nf_flow.output_iface = ctx.nf_output_iface; @@ -2493,8 +2667,11 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) list_push_back(&new_rule->facets, &facet->list_node); facet->rule = new_rule; facet->used = new_rule->created; + facet->rs_used = facet->used; } + ofpbuf_delete(odp_actions); + return true; } @@ -2508,45 +2685,40 @@ queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn, } } +static void +ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg) +{ + queue_tx(msg, ofconn, ofconn->reply_counter); +} + static void send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh, int error) { - struct ofpbuf *buf = make_ofp_error_msg(error, oh); + struct ofpbuf *buf = ofputil_encode_error_msg(error, oh); if (buf) { COVERAGE_INC(ofproto_error); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); } } -static void -hton_ofp_phy_port(struct ofp_phy_port *opp) -{ - opp->port_no = htons(opp->port_no); - opp->config = htonl(opp->config); - opp->state = htonl(opp->state); - opp->curr = htonl(opp->curr); - opp->advertised = htonl(opp->advertised); - opp->supported = htonl(opp->supported); - opp->peer = htonl(opp->peer); -} - static int handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh) { - queue_tx(make_echo_reply(oh), ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, make_echo_reply(oh)); return 0; } static int handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofp_switch_features *osf; struct ofpbuf *buf; struct ofport *port; osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf); - osf->datapath_id = htonll(ofconn->ofproto->datapath_id); + osf->datapath_id = htonll(ofproto->datapath_id); osf->n_buffers = htonl(pktbuf_capacity()); osf->n_tables = 2; osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS | @@ -2564,31 +2736,32 @@ handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh) (1u << OFPAT_SET_TP_DST) | (1u << OFPAT_ENQUEUE)); - HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) { + HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp)); } - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } static int handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf *buf; struct ofp_switch_config *osc; uint16_t flags; bool drop_frags; /* Figure out flags. */ - dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags); + dpif_get_drop_frags(ofproto->dpif, &drop_frags); flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL; /* Send reply. */ osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf); osc->flags = htons(flags); - osc->miss_send_len = htons(ofconn->miss_send_len); - queue_tx(buf, ofconn, ofconn->reply_counter); + osc->miss_send_len = htons(ofconn_get_miss_send_len(ofconn)); + ofconn_send_reply(ofconn, buf); return 0; } @@ -2596,15 +2769,17 @@ handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) static int handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); uint16_t flags = ntohs(osc->flags); - if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) { + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY + && ofconn_get_role(ofconn) != NX_ROLE_SLAVE) { switch (flags & OFPC_FRAG_MASK) { case OFPC_FRAG_NORMAL: - dpif_set_drop_frags(ofconn->ofproto->dpif, false); + dpif_set_drop_frags(ofproto->dpif, false); break; case OFPC_FRAG_DROP: - dpif_set_drop_frags(ofconn->ofproto->dpif, true); + dpif_set_drop_frags(ofproto->dpif, true); break; default: VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")", @@ -2613,15 +2788,11 @@ handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc) } } - ofconn->miss_send_len = ntohs(osc->miss_send_len); + ofconn_set_miss_send_len(ofconn, ntohs(osc->miss_send_len)); return 0; } -/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a - * flow translation. */ -#define MAX_RESUBMIT_RECURSION 8 - static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx); @@ -2643,7 +2814,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) */ } - nl_msg_put_u32(ctx->odp_actions, ODPAT_OUTPUT, port); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, port); ctx->nf_output_iface = port; } @@ -2678,7 +2849,7 @@ xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) ctx->recurse--; } } else { - struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1); + static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1); VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times", MAX_RESUBMIT_RECURSION); @@ -2694,7 +2865,7 @@ flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask, HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { uint16_t odp_port = ofport->odp_port; if (odp_port != odp_in_port && !(ofport->opp.config & mask)) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, odp_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); } } *nf_output_iface = NF_OUT_FLOOD; @@ -2734,7 +2905,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, &ctx->nf_output_iface, ctx->odp_actions); break; case OFPP_CONTROLLER: - nl_msg_put_u32(ctx->odp_actions, ODPAT_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); break; case OFPP_LOCAL: add_output_action(ctx, ODPP_LOCAL); @@ -2781,7 +2952,7 @@ static void add_pop_action(struct action_xlate_ctx *ctx) { if (ctx->odp_actions->size != ctx->last_pop_priority) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_POP_PRIORITY); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_POP_PRIORITY); ctx->last_pop_priority = ctx->odp_actions->size; } } @@ -2812,7 +2983,7 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, /* Add ODP actions. */ remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); add_output_action(ctx, odp_port); add_pop_action(ctx); @@ -2840,7 +3011,7 @@ xlate_set_queue_action(struct action_xlate_ctx *ctx, } remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); } static void @@ -2848,24 +3019,37 @@ xlate_set_dl_tci(struct action_xlate_ctx *ctx) { ovs_be16 tci = ctx->flow.vlan_tci; if (!(tci & htons(VLAN_CFI))) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_STRIP_VLAN); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); } else { - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_DL_TCI, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, tci & ~htons(VLAN_CFI)); } } +struct xlate_reg_state { + ovs_be16 vlan_tci; + ovs_be64 tun_id; +}; + static void -xlate_reg_move_action(struct action_xlate_ctx *ctx, - const struct nx_action_reg_move *narm) +save_reg_state(const struct action_xlate_ctx *ctx, + struct xlate_reg_state *state) { - ovs_be16 old_tci = ctx->flow.vlan_tci; - - nxm_execute_reg_move(narm, &ctx->flow); + state->vlan_tci = ctx->flow.vlan_tci; + state->tun_id = ctx->flow.tun_id; +} - if (ctx->flow.vlan_tci != old_tci) { +static void +update_reg_state(struct action_xlate_ctx *ctx, + const struct xlate_reg_state *state) +{ + if (ctx->flow.vlan_tci != state->vlan_tci) { xlate_set_dl_tci(ctx); } + if (ctx->flow.tun_id != state->tun_id) { + nl_msg_put_be64(ctx->odp_actions, + ODP_ACTION_ATTR_SET_TUNNEL, ctx->flow.tun_id); + } } static void @@ -2875,7 +3059,10 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, const struct nx_action_resubmit *nar; const struct nx_action_set_tunnel *nast; const struct nx_action_set_queue *nasq; + const struct nx_action_multipath *nam; enum nx_action_subtype subtype = ntohs(nah->subtype); + struct xlate_reg_state state; + ovs_be64 tun_id; assert(nah->vendor == htonl(NX_VENDOR_ID)); switch (subtype) { @@ -2886,13 +3073,15 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL: nast = (const struct nx_action_set_tunnel *) nah; - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_TUNNEL, nast->tun_id); - ctx->flow.tun_id = nast->tun_id; + tun_id = htonll(ntohl(nast->tun_id)); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); + ctx->flow.tun_id = tun_id; break; case NXAST_DROP_SPOOFED_ARP: if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_DROP_SPOOFED_ARP); + nl_msg_put_flag(ctx->odp_actions, + ODP_ACTION_ATTR_DROP_SPOOFED_ARP); } break; @@ -2906,17 +3095,34 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, break; case NXAST_REG_MOVE: - xlate_reg_move_action(ctx, (const struct nx_action_reg_move *) nah); + save_reg_state(ctx, &state); + nxm_execute_reg_move((const struct nx_action_reg_move *) nah, + &ctx->flow); + update_reg_state(ctx, &state); break; case NXAST_REG_LOAD: + save_reg_state(ctx, &state); nxm_execute_reg_load((const struct nx_action_reg_load *) nah, &ctx->flow); + update_reg_state(ctx, &state); + break; case NXAST_NOTE: /* Nothing to do. */ break; + case NXAST_SET_TUNNEL64: + tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id; + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); + ctx->flow.tun_id = tun_id; + break; + + case NXAST_MULTIPATH: + nam = (const struct nx_action_multipath *) nah; + multipath_execute(nam, &ctx->flow); + break; + /* If you add a new action here that modifies flow data, don't forget to * update the flow key in ctx->flow at the same time. */ @@ -2972,44 +3178,44 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPAT_SET_DL_SRC: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_SRC, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_SRC, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_DL_DST: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_DST, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_DST, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_NW_SRC: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_SRC, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_SRC, ia->nw_addr.nw_addr); ctx->flow.nw_src = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_DST: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_DST, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_DST, ia->nw_addr.nw_addr); ctx->flow.nw_dst = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_TOS: - nl_msg_put_u8(ctx->odp_actions, ODPAT_SET_NW_TOS, + nl_msg_put_u8(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_TOS, ia->nw_tos.nw_tos); ctx->flow.nw_tos = ia->nw_tos.nw_tos; break; case OFPAT_SET_TP_SRC: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_SRC, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_SRC, ia->tp_port.tp_port); ctx->flow.tp_src = ia->tp_port.tp_port; break; case OFPAT_SET_TP_DST: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_DST, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_DST, ia->tp_port.tp_port); ctx->flow.tp_dst = ia->tp_port.tp_port; break; @@ -3038,6 +3244,19 @@ action_xlate_ctx_init(struct action_xlate_ctx *ctx, ctx->flow = *flow; ctx->packet = packet; ctx->resubmit_hook = NULL; + ctx->check_special = true; +} + +static void +ofproto_process_cfm(struct ofproto *ofproto, const struct flow *flow, + const struct ofpbuf *packet) +{ + struct ofport *ofport; + + ofport = get_port(ofproto, flow->in_port); + if (ofport && ofport->cfm) { + cfm_process_heartbeat(ofport->cfm, packet); + } } static struct ofpbuf * @@ -3052,7 +3271,21 @@ xlate_actions(struct action_xlate_ctx *ctx, ctx->nf_output_iface = NF_OUT_DROP; ctx->recurse = 0; ctx->last_pop_priority = -1; - do_xlate_actions(in, n_in, ctx); + + if (ctx->check_special && cfm_should_process_flow(&ctx->flow)) { + if (ctx->packet) { + ofproto_process_cfm(ctx->ofproto, &ctx->flow, ctx->packet); + } + ctx->may_set_up_flow = false; + } else if (ctx->check_special + && ctx->ofproto->ofhooks->special_cb + && !ctx->ofproto->ofhooks->special_cb(&ctx->flow, ctx->packet, + ctx->ofproto->aux)) { + ctx->may_set_up_flow = false; + } else { + do_xlate_actions(in, n_in, ctx); + } + remove_pop_action(ctx); /* Check with in-band control to see if we're allowed to set up this @@ -3073,7 +3306,8 @@ xlate_actions(struct action_xlate_ctx *ctx, static int reject_slave_controller(struct ofconn *ofconn, const const char *msg_type) { - if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) { + if (ofconn_get_type(ofconn) == OFCONN_PRIMARY + && ofconn_get_role(ofconn) == NX_ROLE_SLAVE) { static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller", msg_type); @@ -3087,7 +3321,7 @@ reject_slave_controller(struct ofconn *ofconn, const const char *msg_type) static int handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_packet_out *opo; struct ofpbuf payload, *buffer; union ofp_action *ofp_actions; @@ -3177,7 +3411,7 @@ update_port_config(struct ofproto *p, struct ofport *port, static int handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); const struct ofp_port_mod *opm = (const struct ofp_port_mod *) oh; struct ofport *port; int error; @@ -3232,7 +3466,7 @@ append_ofp_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofp_stats_reply *reply = msg->data; reply->flags = htons(OFPSF_REPLY_MORE); *msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); } return ofpbuf_put_uninit(*msgp, nbytes); } @@ -3248,7 +3482,7 @@ make_nxstats_reply(ovs_be32 xid, ovs_be32 subtype, size_t body_len) nsm->type = htons(OFPST_VENDOR); nsm->flags = htons(0); nsm->vendor = htonl(NX_VENDOR_ID); - nsm->subtype = htonl(subtype); + nsm->subtype = subtype; return msg; } @@ -3268,7 +3502,7 @@ append_nxstats_reply(size_t nbytes, struct ofconn *ofconn, struct nicira_stats_msg *reply = msg->data; reply->flags = htons(OFPSF_REPLY_MORE); *msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); } ofpbuf_prealloc_tailroom(*msgp, nbytes); } @@ -3277,7 +3511,7 @@ static int handle_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_desc_stats *ods; struct ofpbuf *msg; @@ -3289,7 +3523,7 @@ handle_desc_stats_request(struct ofconn *ofconn, ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc); ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num); ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } @@ -3298,7 +3532,7 @@ static int handle_table_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_table_stats *ots; struct ofpbuf *msg; @@ -3308,14 +3542,14 @@ handle_table_stats_request(struct ofconn *ofconn, ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg); memset(ots, 0, sizeof *ots); strcpy(ots->name, "classifier"); - ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10 + ots->wildcards = (ofconn_get_flow_format(ofconn) == NXFF_OPENFLOW10 ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL)); ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */ ots->active_count = htonl(classifier_count(&p->cls)); - ots->lookup_count = htonll(0); /* XXX */ - ots->matched_count = htonll(0); /* XXX */ + put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */ + put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */ - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } @@ -3334,24 +3568,24 @@ append_port_stat(struct ofport *port, struct ofconn *ofconn, ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp); ops->port_no = htons(port->opp.port_no); memset(ops->pad, 0, sizeof ops->pad); - ops->rx_packets = htonll(stats.rx_packets); - ops->tx_packets = htonll(stats.tx_packets); - ops->rx_bytes = htonll(stats.rx_bytes); - ops->tx_bytes = htonll(stats.tx_bytes); - ops->rx_dropped = htonll(stats.rx_dropped); - ops->tx_dropped = htonll(stats.tx_dropped); - ops->rx_errors = htonll(stats.rx_errors); - ops->tx_errors = htonll(stats.tx_errors); - ops->rx_frame_err = htonll(stats.rx_frame_errors); - ops->rx_over_err = htonll(stats.rx_over_errors); - ops->rx_crc_err = htonll(stats.rx_crc_errors); - ops->collisions = htonll(stats.collisions); + put_32aligned_be64(&ops->rx_packets, htonll(stats.rx_packets)); + put_32aligned_be64(&ops->tx_packets, htonll(stats.tx_packets)); + put_32aligned_be64(&ops->rx_bytes, htonll(stats.rx_bytes)); + put_32aligned_be64(&ops->tx_bytes, htonll(stats.tx_bytes)); + put_32aligned_be64(&ops->rx_dropped, htonll(stats.rx_dropped)); + put_32aligned_be64(&ops->tx_dropped, htonll(stats.tx_dropped)); + put_32aligned_be64(&ops->rx_errors, htonll(stats.rx_errors)); + put_32aligned_be64(&ops->tx_errors, htonll(stats.tx_errors)); + put_32aligned_be64(&ops->rx_frame_err, htonll(stats.rx_frame_errors)); + put_32aligned_be64(&ops->rx_over_err, htonll(stats.rx_over_errors)); + put_32aligned_be64(&ops->rx_crc_err, htonll(stats.rx_crc_errors)); + put_32aligned_be64(&ops->collisions, htonll(stats.collisions)); } static int handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); const struct ofp_port_stats_request *psr = ofputil_stats_body(oh); struct ofp_port_stats *ops; struct ofpbuf *msg; @@ -3369,65 +3603,26 @@ handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) } } - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } -/* Obtains statistic counters for 'rule' within 'p' and stores them into - * '*packet_countp' and '*byte_countp'. The returned statistics include - * statistics for all of 'rule''s facets. */ static void -query_stats(struct ofproto *p, struct rule *rule, - uint64_t *packet_countp, uint64_t *byte_countp) +calc_flow_duration__(long long int start, uint32_t *sec, uint32_t *nsec) { - uint64_t packet_count, byte_count; - struct facet *facet; - struct odp_flow *odp_flows; - size_t n_odp_flows; - - /* Start from historical data for 'rule' itself that are no longer tracked - * by the datapath. This counts, for example, facets that have expired. */ - packet_count = rule->packet_count; - byte_count = rule->byte_count; - - /* Prepare to ask the datapath for statistics on all of the rule's facets. - * - * Also, add any statistics that are not tracked by the datapath for each - * facet. This includes, for example, statistics for packets that were - * executed "by hand" by ofproto via dpif_execute() but must be accounted - * to a rule. */ - odp_flows = xzalloc(list_size(&rule->facets) * sizeof *odp_flows); - n_odp_flows = 0; - LIST_FOR_EACH (facet, list_node, &rule->facets) { - struct odp_flow *odp_flow = &odp_flows[n_odp_flows++]; - odp_flow_key_from_flow(&odp_flow->key, &facet->flow); - packet_count += facet->packet_count; - byte_count += facet->byte_count; - } - - /* Fetch up-to-date statistics from the datapath and add them in. */ - if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) { - size_t i; - - for (i = 0; i < n_odp_flows; i++) { - struct odp_flow *odp_flow = &odp_flows[i]; - packet_count += odp_flow->stats.n_packets; - byte_count += odp_flow->stats.n_bytes; - } - } - free(odp_flows); - - /* Return the stats to the caller. */ - *packet_countp = packet_count; - *byte_countp = byte_count; + long long int msecs = time_msec() - start; + *sec = msecs / 1000; + *nsec = (msecs % 1000) * (1000 * 1000); } static void -calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec) +calc_flow_duration(long long int start, ovs_be32 *sec_be, ovs_be32 *nsec_be) { - long long int msecs = time_msec() - start; - *sec = htonl(msecs / 1000); - *nsec = htonl((msecs % 1000) * (1000 * 1000)); + uint32_t sec, nsec; + + calc_flow_duration__(start, &sec, &nsec); + *sec_be = htonl(sec); + *nsec_be = htonl(nsec); } static void @@ -3436,6 +3631,7 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, { struct ofp_flow_stats *ofs; uint64_t packet_count, byte_count; + ovs_be64 cookie; size_t act_len, len; if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) { @@ -3445,21 +3641,22 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, act_len = sizeof *rule->actions * rule->n_actions; len = offsetof(struct ofp_flow_stats, actions) + act_len; - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ofs = append_ofp_stats_reply(len, ofconn, replyp); ofs->length = htons(len); ofs->table_id = 0; ofs->pad = 0; - ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match); + ofputil_cls_rule_to_match(&rule->cr, ofconn_get_flow_format(ofconn), + &ofs->match, rule->flow_cookie, &cookie); + put_32aligned_be64(&ofs->cookie, cookie); calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec); - ofs->cookie = rule->flow_cookie; ofs->priority = htons(rule->cr.priority); ofs->idle_timeout = htons(rule->idle_timeout); ofs->hard_timeout = htons(rule->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); - ofs->packet_count = htonll(packet_count); - ofs->byte_count = htonll(byte_count); + put_32aligned_be64(&ofs->packet_count, htonll(packet_count)); + put_32aligned_be64(&ofs->byte_count, htonll(byte_count)); if (rule->n_actions > 0) { memcpy(ofs->actions, rule->actions, act_len); } @@ -3468,13 +3665,23 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, static bool is_valid_table(uint8_t table_id) { - return table_id == 0 || table_id == 0xff; + if (table_id == 0 || table_id == 0xff) { + return true; + } else { + /* It would probably be better to reply with an error but there doesn't + * seem to be any appropriate value, so that might just be + * confusing. */ + VLOG_WARN_RL(&rl, "controller asked for invalid table %"PRIu8, + table_id); + return false; + } } static int handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { const struct ofp_flow_stats_request *fsr = ofputil_stats_body(oh); + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf *reply; COVERAGE_INC(ofproto_flows_req); @@ -3486,12 +3693,12 @@ handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) ofputil_cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0, &target); - cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target); + cls_cursor_init(&cursor, &ofproto->cls, &target); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { put_ofp_flow_stats(ofconn, rule, fsr->out_port, &reply); } } - queue_tx(reply, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, reply); return 0; } @@ -3509,12 +3716,12 @@ put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule, return; } - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); act_len = sizeof *rule->actions * rule->n_actions; - start_len = (*replyp)->size; append_nxstats_reply(sizeof *nfs + NXM_MAX_LEN + act_len, ofconn, replyp); + start_len = (*replyp)->size; reply = *replyp; nfs = ofpbuf_put_uninit(reply, sizeof *nfs); @@ -3538,6 +3745,7 @@ put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule, static int handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct nx_flow_stats_request *nfsr; struct cls_rule target; struct ofpbuf *reply; @@ -3562,32 +3770,32 @@ handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh) struct cls_cursor cursor; struct rule *rule; - cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target); + cls_cursor_init(&cursor, &ofproto->cls, &target); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { put_nx_flow_stats(ofconn, rule, nfsr->out_port, &reply); } } - queue_tx(reply, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, reply); return 0; } static void -flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) +flow_stats_ds(struct rule *rule, struct ds *results) { - struct ofp_match match; uint64_t packet_count, byte_count; size_t act_len = sizeof *rule->actions * rule->n_actions; - query_stats(ofproto, rule, &packet_count, &byte_count); - ofputil_cls_rule_to_match(&rule->cr, NXFF_OPENFLOW10, &match); + rule_get_stats(rule, &packet_count, &byte_count); ds_put_format(results, "duration=%llds, ", (time_msec() - rule->created) / 1000); + ds_put_format(results, "idle=%.3fs, ", (time_msec() - rule->used) / 1000.0); ds_put_format(results, "priority=%u, ", rule->cr.priority); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); - ofp_print_match(results, &match, true); + cls_rule_format(&rule->cr, results); + ds_put_char(results, ','); if (act_len > 0) { ofp_print_actions(results, &rule->actions->header, act_len); } else { @@ -3597,7 +3805,7 @@ flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) } /* Adds a pretty-printed description of all flows to 'results', including - * those marked hidden by secchan (e.g., by in-band control). */ + * hidden flows (e.g., set up by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) { @@ -3606,7 +3814,7 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results) cls_cursor_init(&cursor, &p->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { - flow_stats_ds(p, rule, results); + flow_stats_ds(rule, results); } } @@ -3631,7 +3839,7 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, uint64_t packet_count; uint64_t byte_count; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); total_packets += packet_count; total_bytes += byte_count; @@ -3641,8 +3849,8 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, } oasr->flow_count = htonl(n_flows); - oasr->packet_count = htonll(total_packets); - oasr->byte_count = htonll(total_bytes); + put_32aligned_be64(&oasr->packet_count, htonll(total_packets)); + put_32aligned_be64(&oasr->byte_count, htonll(total_bytes)); memset(oasr->pad, 0, sizeof oasr->pad); } @@ -3651,6 +3859,7 @@ handle_aggregate_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { const struct ofp_aggregate_stats_request *request = ofputil_stats_body(oh); + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofp_aggregate_stats_reply *reply; struct cls_rule target; struct ofpbuf *msg; @@ -3660,15 +3869,16 @@ handle_aggregate_stats_request(struct ofconn *ofconn, msg = start_ofp_stats_reply(oh, sizeof *reply); reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg); - query_aggregate_stats(ofconn->ofproto, &target, request->out_port, + query_aggregate_stats(ofproto, &target, request->out_port, request->table_id, reply); - queue_tx(msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, msg); return 0; } static int handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct nx_aggregate_stats_request *request; struct ofp_aggregate_stats_reply *reply; struct cls_rule target; @@ -3692,9 +3902,9 @@ handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh) COVERAGE_INC(ofproto_flows_req); buf = start_nxstats_reply(&request->nsm, sizeof *reply); reply = ofpbuf_put_uninit(buf, sizeof *reply); - query_aggregate_stats(ofconn->ofproto, &target, request->out_port, + query_aggregate_stats(ofproto, &target, request->out_port, request->table_id, reply); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } @@ -3715,9 +3925,9 @@ put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, reply->port_no = htons(cbdata->ofport->opp.port_no); memset(reply->pad, 0, sizeof reply->pad); reply->queue_id = htonl(queue_id); - reply->tx_bytes = htonll(stats->tx_bytes); - reply->tx_packets = htonll(stats->tx_packets); - reply->tx_errors = htonll(stats->tx_errors); + put_32aligned_be64(&reply->tx_bytes, htonll(stats->tx_bytes)); + put_32aligned_be64(&reply->tx_packets, htonll(stats->tx_packets)); + put_32aligned_be64(&reply->tx_errors, htonll(stats->tx_errors)); } static void @@ -3750,7 +3960,7 @@ handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id, static int handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *ofproto = ofconn->ofproto; + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); const struct ofp_queue_stats_request *qsr; struct queue_stats_cbdata cbdata; struct ofport *port; @@ -3782,22 +3992,17 @@ handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) ofpbuf_delete(cbdata.msg); return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT); } - queue_tx(cbdata.msg, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, cbdata.msg); return 0; } -static long long int -msec_from_nsec(uint64_t sec, uint32_t nsec) -{ - return !sec ? 0 : sec * 1000 + nsec / 1000000; -} - +/* Updates 'facet''s used time. Caller is responsible for calling + * facet_push_stats() to update the flows which 'facet' resubmits into. */ static void facet_update_time(struct ofproto *ofproto, struct facet *facet, - const struct odp_flow_stats *stats) + long long int used) { - long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec); if (used > facet->used) { facet->used = used; if (used > facet->rule->used) { @@ -3815,21 +4020,81 @@ facet_update_time(struct ofproto *ofproto, struct facet *facet, * cleared out of the datapath. */ static void facet_update_stats(struct ofproto *ofproto, struct facet *facet, - const struct odp_flow_stats *stats) + const struct dpif_flow_stats *stats) { - if (stats->n_packets) { - facet_update_time(ofproto, facet, stats); + if (stats->n_packets || stats->used > facet->used) { + facet_update_time(ofproto, facet, stats->used); facet->packet_count += stats->n_packets; facet->byte_count += stats->n_bytes; + facet_push_stats(ofproto, facet); netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags); } } +static void +facet_push_stats(struct ofproto *ofproto, struct facet *facet) +{ + uint64_t rs_packets, rs_bytes; + + assert(facet->packet_count >= facet->rs_packet_count); + assert(facet->byte_count >= facet->rs_byte_count); + assert(facet->used >= facet->rs_used); + + rs_packets = facet->packet_count - facet->rs_packet_count; + rs_bytes = facet->byte_count - facet->rs_byte_count; + + if (rs_packets || rs_bytes || facet->used > facet->rs_used) { + facet->rs_packet_count = facet->packet_count; + facet->rs_byte_count = facet->byte_count; + facet->rs_used = facet->used; + + flow_push_stats(ofproto, facet->rule, &facet->flow, + rs_packets, rs_bytes, facet->used); + } +} + +struct ofproto_push { + struct action_xlate_ctx ctx; + uint64_t packets; + uint64_t bytes; + long long int used; +}; + +static void +push_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) +{ + struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx); + + if (rule) { + rule->packet_count += push->packets; + rule->byte_count += push->bytes; + rule->used = MAX(push->used, rule->used); + } +} + +/* Pushes flow statistics to the rules which 'flow' resubmits into given + * 'rule''s actions. */ +static void +flow_push_stats(struct ofproto *ofproto, const struct rule *rule, + struct flow *flow, uint64_t packets, uint64_t bytes, + long long int used) +{ + struct ofproto_push push; + + push.packets = packets; + push.bytes = bytes; + push.used = used; + + action_xlate_ctx_init(&push.ctx, ofproto, flow, NULL); + push.ctx.resubmit_hook = push_resubmit; + ofpbuf_delete(xlate_actions(&push.ctx, rule->actions, rule->n_actions)); +} + /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT * in which no matching flow already exists in the flow table. * * Adds the flow specified by 'ofm', which is followed by 'n_actions' - * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an + * ofp_actions, to the ofproto's flow table. Returns 0 on success or an * OpenFlow error code as encoded by ofp_mkerr() on failure. * * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id, @@ -3837,7 +4102,7 @@ facet_update_stats(struct ofproto *ofproto, struct facet *facet, static int add_flow(struct ofconn *ofconn, struct flow_mod *fm) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofpbuf *packet; struct rule *rule; uint16_t in_port; @@ -3877,6 +4142,7 @@ static int send_buffered_packet(struct ofconn *ofconn, struct rule *rule, uint32_t buffer_id) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf *packet; uint16_t in_port; int error; @@ -3890,7 +4156,7 @@ send_buffered_packet(struct ofconn *ofconn, return error; } - rule_execute(ofconn->ofproto, rule, in_port, packet); + rule_execute(ofproto, rule, in_port, packet); return 0; } @@ -3914,7 +4180,7 @@ static int modify_flow(struct ofproto *, const struct flow_mod *, static int modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct rule *match = NULL; struct cls_cursor cursor; struct rule *rule; @@ -3946,7 +4212,7 @@ modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm) static int modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct rule *rule = find_flow_strict(p, fm); if (rule && !rule_is_hidden(rule)) { modify_flow(p, fm, rule); @@ -4037,7 +4303,7 @@ delete_flow(struct ofproto *p, struct rule *rule, ovs_be16 out_port) static int handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) { - struct ofproto *p = ofconn->ofproto; + struct ofproto *p = ofconn_get_ofproto(ofconn); struct flow_mod fm; int error; @@ -4046,7 +4312,7 @@ handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) return error; } - error = ofputil_decode_flow_mod(&fm, oh, ofconn->flow_format); + error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_flow_format(ofconn)); if (error) { return error; } @@ -4093,8 +4359,11 @@ handle_tun_id_from_cookie(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nxt_tun_id_cookie *msg = (const struct nxt_tun_id_cookie *) oh; + enum nx_flow_format flow_format; + + flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10; + ofconn_set_flow_format(ofconn, flow_format); - ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10; return 0; } @@ -4106,7 +4375,7 @@ handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh) struct ofpbuf *buf; uint32_t role; - if (ofconn->type != OFCONN_PRIMARY) { + if (ofconn_get_type(ofconn) != OFCONN_PRIMARY) { VLOG_WARN_RL(&rl, "ignoring role request on non-controller " "connection"); return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM); @@ -4122,19 +4391,20 @@ handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh) } if (role == NX_ROLE_MASTER) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofconn *other; - HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) { - if (other->role == NX_ROLE_MASTER) { - other->role = NX_ROLE_SLAVE; + HMAP_FOR_EACH (other, hmap_node, &ofproto->controllers) { + if (ofconn_get_role(other) == NX_ROLE_MASTER) { + ofconn_set_role(other, NX_ROLE_SLAVE); } } } - ofconn->role = role; + ofconn_set_role(ofconn, role); reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, oh->xid, &buf); reply->role = htonl(role); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } @@ -4150,7 +4420,7 @@ handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh) if (format == NXFF_OPENFLOW10 || format == NXFF_TUN_ID_FROM_COOKIE || format == NXFF_NXM) { - ofconn->flow_format = format; + ofconn_set_flow_format(ofconn, format); return 0; } else { return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM); @@ -4166,7 +4436,7 @@ handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh) /* Currently, everything executes synchronously, so we can just * immediately send the barrier reply. */ ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf); - queue_tx(buf, ofconn, ofconn->reply_counter); + ofconn_send_reply(ofconn, buf); return 0; } @@ -4213,10 +4483,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) return 0; /* Nicira extension requests. */ - case OFPUTIL_NXT_STATUS_REQUEST: - return switch_status_handle_request( - ofconn->ofproto->switch_status, ofconn->rconn, oh); - case OFPUTIL_NXT_TUN_ID_FROM_COOKIE: return handle_tun_id_from_cookie(ofconn, oh); @@ -4272,7 +4538,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) case OFPUTIL_OFPST_PORT_REPLY: case OFPUTIL_OFPST_TABLE_REPLY: case OFPUTIL_OFPST_AGGREGATE_REPLY: - case OFPUTIL_NXT_STATUS_REPLY: case OFPUTIL_NXT_ROLE_REPLY: case OFPUTIL_NXT_FLOW_REMOVED: case OFPUTIL_NXST_FLOW_REPLY: @@ -4302,30 +4567,31 @@ handle_openflow(struct ofconn *ofconn, struct ofpbuf *ofp_msg) } static void -handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet) +handle_miss_upcall(struct ofproto *p, struct dpif_upcall *upcall) { - struct odp_msg *msg = packet->data; - struct ofpbuf payload; struct facet *facet; struct flow flow; - ofpbuf_use_const(&payload, msg + 1, msg->length - sizeof *msg); - flow_extract(&payload, msg->arg, msg->port, &flow); + /* Obtain in_port and tun_id, at least. */ + odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); - packet->l2 = payload.l2; - packet->l3 = payload.l3; - packet->l4 = payload.l4; - packet->l7 = payload.l7; + /* Set header pointers in 'flow'. */ + flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow); + + if (cfm_should_process_flow(&flow)) { + ofproto_process_cfm(p, &flow, upcall->packet); + ofpbuf_delete(upcall->packet); + return; + } else if (p->ofhooks->special_cb + && !p->ofhooks->special_cb(&flow, upcall->packet, p->aux)) { + ofpbuf_delete(upcall->packet); + return; + } /* Check with in-band control to see if this packet should be sent * to the local port regardless of the flow table. */ - if (in_band_msg_in_hook(p->in_band, &flow, &payload)) { - struct ofpbuf odp_actions; - - ofpbuf_init(&odp_actions, 32); - nl_msg_put_u32(&odp_actions, ODPAT_OUTPUT, ODPP_LOCAL); - dpif_execute(p->dpif, odp_actions.data, odp_actions.size, &payload); - ofpbuf_uninit(&odp_actions); + if (in_band_msg_in_hook(p->in_band, &flow, upcall->packet)) { + ofproto_send_packet(p, ODPP_LOCAL, 0, upcall->packet); } facet = facet_lookup_valid(p, &flow); @@ -4333,29 +4599,29 @@ handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet) struct rule *rule = rule_lookup(p, &flow); if (!rule) { /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */ - struct ofport *port = get_port(p, msg->port); + struct ofport *port = get_port(p, flow.in_port); if (port) { if (port->opp.config & OFPPC_NO_PACKET_IN) { COVERAGE_INC(ofproto_no_packet_in); /* XXX install 'drop' flow entry */ - ofpbuf_delete(packet); + ofpbuf_delete(upcall->packet); return; } } else { VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, - msg->port); + flow.in_port); } COVERAGE_INC(ofproto_packet_in); - send_packet_in(p, packet); + send_packet_in(p, upcall, &flow, false); return; } - facet = facet_create(p, rule, &flow, packet); + facet = facet_create(p, rule, &flow, upcall->packet); } else if (!facet->may_install) { /* The facet is not installable, that is, we need to process every * packet, so process the current packet's actions into 'facet'. */ - facet_make_actions(p, facet, packet); + facet_make_actions(p, facet, upcall->packet); } if (facet->rule->cr.priority == FAIL_OPEN_PRIORITY) { @@ -4369,40 +4635,40 @@ handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet) * * See the top-level comment in fail-open.c for more information. */ - send_packet_in(p, ofpbuf_clone_with_headroom(packet, - DPIF_RECV_MSG_PADDING)); + send_packet_in(p, upcall, &flow, true); } - ofpbuf_pull(packet, sizeof *msg); - facet_execute(p, facet, packet); + facet_execute(p, facet, upcall->packet); facet_install(p, facet, false); } static void -handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) +handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) { - struct odp_msg *msg = packet->data; + struct flow flow; - switch (msg->type) { - case _ODPL_ACTION_NR: + switch (upcall->type) { + case DPIF_UC_ACTION: COVERAGE_INC(ofproto_ctlr_action); - send_packet_in(p, packet); + odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); + send_packet_in(p, upcall, &flow, false); break; - case _ODPL_SFLOW_NR: + case DPIF_UC_SAMPLE: if (p->sflow) { - ofproto_sflow_received(p->sflow, msg); + odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); + ofproto_sflow_received(p->sflow, upcall, &flow); } - ofpbuf_delete(packet); + ofpbuf_delete(upcall->packet); break; - case _ODPL_MISS_NR: - handle_odp_miss_msg(p, packet); + case DPIF_UC_MISS: + handle_miss_upcall(p, upcall); break; + case DPIF_N_UC_TYPES: default: - VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32, - msg->type); + VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); break; } } @@ -4410,7 +4676,7 @@ handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) /* Flow expiration. */ static int ofproto_dp_max_idle(const struct ofproto *); -static void ofproto_update_used(struct ofproto *); +static void ofproto_update_stats(struct ofproto *); static void rule_expire(struct ofproto *, struct rule *); static void ofproto_expire_facets(struct ofproto *, int dp_max_idle); @@ -4427,8 +4693,8 @@ ofproto_expire(struct ofproto *ofproto) struct cls_cursor cursor; int dp_max_idle; - /* Update 'used' for each flow in the datapath. */ - ofproto_update_used(ofproto); + /* Update stats for each flow in the datapath. */ + ofproto_update_stats(ofproto); /* Expire facets that have been idle too long. */ dp_max_idle = ofproto_dp_max_idle(ofproto); @@ -4451,40 +4717,71 @@ ofproto_expire(struct ofproto *ofproto) return MIN(dp_max_idle, 1000); } -/* Update 'used' member of installed facets. */ +/* Update 'packet_count', 'byte_count', and 'used' members of installed facets. + * + * This function also pushes statistics updates to rules which each facet + * resubmits into. Generally these statistics will be accurate. However, if a + * facet changes the rule it resubmits into at some time in between + * ofproto_update_stats() runs, it is possible that statistics accrued to the + * old rule will be incorrectly attributed to the new rule. This could be + * avoided by calling ofproto_update_stats() whenever rules are created or + * deleted. However, the performance impact of making so many calls to the + * datapath do not justify the benefit of having perfectly accurate statistics. + */ static void -ofproto_update_used(struct ofproto *p) +ofproto_update_stats(struct ofproto *p) { - struct odp_flow *flows; - size_t n_flows; - size_t i; - int error; - - error = dpif_flow_list_all(p->dpif, &flows, &n_flows); - if (error) { - return; - } + const struct dpif_flow_stats *stats; + struct dpif_flow_dump dump; + const struct nlattr *key; + size_t key_len; - for (i = 0; i < n_flows; i++) { - struct odp_flow *f = &flows[i]; + dpif_flow_dump_start(&dump, p->dpif); + while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) { struct facet *facet; struct flow flow; - odp_flow_key_to_flow(&f->key, &flow); + if (odp_flow_key_to_flow(key, key_len, &flow)) { + struct ds s; + + ds_init(&s); + odp_flow_key_format(key, key_len, &s); + VLOG_WARN_RL(&rl, "failed to convert ODP flow key to flow: %s", + ds_cstr(&s)); + ds_destroy(&s); + + continue; + } facet = facet_find(p, &flow); if (facet && facet->installed) { - facet_update_time(p, facet, &f->stats); - facet_account(p, facet, f->stats.n_bytes); + + if (stats->n_packets >= facet->dp_packet_count) { + facet->packet_count += stats->n_packets - facet->dp_packet_count; + } else { + VLOG_WARN_RL(&rl, "unexpected packet count from the datapath"); + } + + if (stats->n_bytes >= facet->dp_byte_count) { + facet->byte_count += stats->n_bytes - facet->dp_byte_count; + } else { + VLOG_WARN_RL(&rl, "unexpected byte count from datapath"); + } + + facet->dp_packet_count = stats->n_packets; + facet->dp_byte_count = stats->n_bytes; + + facet_update_time(p, facet, stats->used); + facet_account(p, facet, stats->n_bytes); + facet_push_stats(p, facet); } else { /* There's a flow in the datapath that we know nothing about. * Delete it. */ COVERAGE_INC(ofproto_unexpected_rule); - dpif_flow_del(p->dpif, f); + dpif_flow_del(p->dpif, key, key_len, NULL); } - } - free(flows); + dpif_flow_dump_done(&dump); } /* Calculates and returns the number of milliseconds of idle time after which @@ -4519,7 +4816,7 @@ ofproto_dp_max_idle(const struct ofproto *ofproto) * they receive additional data). * * This requires a second pass through the facets, in addition to the pass - * made by ofproto_update_used(), because the former function never looks + * made by ofproto_update_stats(), because the former function never looks * at uninstallable facets. */ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) }; @@ -4583,32 +4880,19 @@ facet_active_timeout(struct ofproto *ofproto, struct facet *facet) if (ofproto->netflow && !facet_is_controller_flow(facet) && netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) { struct ofexpired expired; - struct odp_flow odp_flow; - /* Get updated flow stats. - * - * XXX We could avoid this call entirely if (1) ofproto_update_used() - * updated TCP flags and (2) the dpif_flow_list_all() in - * ofproto_update_used() zeroed TCP flags. */ - memset(&odp_flow, 0, sizeof odp_flow); if (facet->installed) { - odp_flow_key_from_flow(&odp_flow.key, &facet->flow); - odp_flow.flags = ODPFF_ZERO_TCP_FLAGS; - dpif_flow_get(ofproto->dpif, &odp_flow); - - if (odp_flow.stats.n_packets) { - facet_update_time(ofproto, facet, &odp_flow.stats); - netflow_flow_update_flags(&facet->nf_flow, - odp_flow.stats.tcp_flags); - } + struct dpif_flow_stats stats; + + facet_put__(ofproto, facet, facet->actions, facet->actions_len, + &stats); + facet_update_stats(ofproto, facet, &stats); } expired.flow = facet->flow; - expired.packet_count = facet->packet_count + - odp_flow.stats.n_packets; - expired.byte_count = facet->byte_count + odp_flow.stats.n_bytes; + expired.packet_count = facet->packet_count; + expired.byte_count = facet->byte_count; expired.used = facet->used; - netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); } } @@ -4663,58 +4947,24 @@ rule_expire(struct ofproto *ofproto, struct rule *rule) rule_remove(ofproto, rule); } -static struct ofpbuf * -compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule, - uint8_t reason) -{ - struct ofp_flow_removed *ofr; - struct ofpbuf *buf; - - ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf); - ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofr->match); - ofr->cookie = rule->flow_cookie; - ofr->priority = htons(rule->cr.priority); - ofr->reason = reason; - calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec); - ofr->idle_timeout = htons(rule->idle_timeout); - ofr->packet_count = htonll(rule->packet_count); - ofr->byte_count = htonll(rule->byte_count); - - return buf; -} - -static struct ofpbuf * -compose_nx_flow_removed(const struct rule *rule, uint8_t reason) -{ - struct nx_flow_removed *nfr; - struct ofpbuf *buf; - int match_len; - - nfr = make_nxmsg(sizeof *nfr, NXT_FLOW_REMOVED, &buf); - - match_len = nx_put_match(buf, &rule->cr); - - nfr->cookie = rule->flow_cookie; - nfr->priority = htons(rule->cr.priority); - nfr->reason = reason; - calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec); - nfr->idle_timeout = htons(rule->idle_timeout); - nfr->match_len = htons(match_len); - nfr->packet_count = htonll(rule->packet_count); - nfr->byte_count = htonll(rule->byte_count); - - return buf; -} - static void rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason) { + struct ofputil_flow_removed fr; struct ofconn *ofconn; if (!rule->send_flow_removed) { return; } + fr.rule = rule->cr; + fr.cookie = rule->flow_cookie; + fr.reason = reason; + calc_flow_duration__(rule->created, &fr.duration_sec, &fr.duration_nsec); + fr.idle_timeout = rule->idle_timeout; + fr.packet_count = rule->packet_count; + fr.byte_count = rule->byte_count; + LIST_FOR_EACH (ofconn, node, &p->all_conns) { struct ofpbuf *msg; @@ -4723,163 +4973,124 @@ rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason) continue; } - msg = (ofconn->flow_format == NXFF_NXM - ? compose_nx_flow_removed(rule, reason) - : compose_ofp_flow_removed(ofconn, rule, reason)); + /* This accounts flow expirations as if they were replies to OpenFlow + * requests. That works because preventing OpenFlow requests from + * being processed also prevents new flows from being added (and + * expiring). (It also prevents processing OpenFlow requests that + * would not add new flows, so it is imperfect.) */ + msg = ofputil_encode_flow_removed(&fr, ofconn_get_flow_format(ofconn)); + ofconn_send_reply(ofconn, msg); + } +} + +/* Obtains statistics for 'rule' and stores them in '*packets' and '*bytes'. + * The returned statistics include statistics for all of 'rule''s facets. */ +static void +rule_get_stats(const struct rule *rule, uint64_t *packets, uint64_t *bytes) +{ + uint64_t p, b; + struct facet *facet; + + /* Start from historical data for 'rule' itself that are no longer tracked + * in facets. This counts, for example, facets that have expired. */ + p = rule->packet_count; + b = rule->byte_count; - /* Account flow expirations under ofconn->reply_counter, the counter - * for replies to OpenFlow requests. That works because preventing - * OpenFlow requests from being processed also prevents new flows from - * being added (and expiring). (It also prevents processing OpenFlow - * requests that would not add new flows, so it is imperfect.) */ - queue_tx(msg, ofconn, ofconn->reply_counter); + /* Add any statistics that are tracked by facets. This includes + * statistical data recently updated by ofproto_update_stats() as well as + * stats for packets that were executed "by hand" via dpif_execute(). */ + LIST_FOR_EACH (facet, list_node, &rule->facets) { + p += facet->packet_count; + b += facet->byte_count; } + + *packets = p; + *bytes = b; } -/* pinsched callback for sending 'packet' on 'ofconn'. */ +/* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */ static void -do_send_packet_in(struct ofpbuf *packet, void *ofconn_) +do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_) { struct ofconn *ofconn = ofconn_; - rconn_send_with_limit(ofconn->rconn, packet, + rconn_send_with_limit(ofconn->rconn, ofp_packet_in, ofconn->packet_in_counter, 100); } -/* Takes 'packet', which has been converted with do_convert_to_packet_in(), and - * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s - * packet scheduler for sending. - * - * 'max_len' specifies the maximum number of bytes of the packet to send on - * 'ofconn' (INT_MAX specifies no limit). +/* Takes 'upcall', whose packet has the flow specified by 'flow', composes an + * OpenFlow packet-in message from it, and passes it to 'ofconn''s packet + * scheduler for sending. * - * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise, - * ownership is transferred to this function. */ + * If 'clone' is true, the caller retains ownership of 'upcall->packet'. + * Otherwise, ownership is transferred to this function. */ static void -schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len, - bool clone) -{ - struct ofproto *ofproto = ofconn->ofproto; - struct ofp_packet_in *opi = packet->data; - uint16_t in_port = ofp_port_to_odp_port(ntohs(opi->in_port)); - int send_len, trim_size; - uint32_t buffer_id; - - /* Get buffer. */ - if (opi->reason == OFPR_ACTION) { - buffer_id = UINT32_MAX; +schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, + const struct flow *flow, bool clone) +{ + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); + struct ofputil_packet_in pin; + struct ofpbuf *msg; + + /* Figure out the easy parts. */ + pin.packet = upcall->packet; + pin.in_port = odp_port_to_ofp_port(flow->in_port); + pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; + + /* Get OpenFlow buffer_id. */ + if (upcall->type == DPIF_UC_ACTION) { + pin.buffer_id = UINT32_MAX; } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { - buffer_id = pktbuf_get_null(); + pin.buffer_id = pktbuf_get_null(); } else if (!ofconn->pktbuf) { - buffer_id = UINT32_MAX; + pin.buffer_id = UINT32_MAX; } else { - struct ofpbuf payload; - - ofpbuf_use_const(&payload, opi->data, - packet->size - offsetof(struct ofp_packet_in, data)); - buffer_id = pktbuf_save(ofconn->pktbuf, &payload, in_port); + pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, + flow->in_port); } /* Figure out how much of the packet to send. */ - send_len = ntohs(opi->total_len); - if (buffer_id != UINT32_MAX) { - send_len = MIN(send_len, ofconn->miss_send_len); - } - send_len = MIN(send_len, max_len); - - /* Adjust packet length and clone if necessary. */ - trim_size = offsetof(struct ofp_packet_in, data) + send_len; - if (clone) { - packet = ofpbuf_clone_data(packet->data, trim_size); - opi = packet->data; - } else { - packet->size = trim_size; + pin.send_len = upcall->packet->size; + if (pin.buffer_id != UINT32_MAX) { + pin.send_len = MIN(pin.send_len, ofconn->miss_send_len); } - - /* Update packet headers. */ - opi->buffer_id = htonl(buffer_id); - update_openflow_length(packet); - - /* Hand over to packet scheduler. It might immediately call into - * do_send_packet_in() or it might buffer it for a while (until a later - * call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[opi->reason], in_port, - packet, do_send_packet_in, ofconn); -} - -/* Replace struct odp_msg header in 'packet' by equivalent struct - * ofp_packet_in. The odp_msg must have sufficient headroom to do so (e.g. as - * returned by dpif_recv()). - * - * The conversion is not complete: the caller still needs to trim any unneeded - * payload off the end of the buffer, set the length in the OpenFlow header, - * and set buffer_id. Those require us to know the controller settings and so - * must be done on a per-controller basis. - * - * Returns the maximum number of bytes of the packet that should be sent to - * the controller (INT_MAX if no limit). */ -static int -do_convert_to_packet_in(struct ofpbuf *packet) -{ - struct odp_msg *msg = packet->data; - struct ofp_packet_in *opi; - uint8_t reason; - uint16_t total_len; - uint16_t in_port; - int max_len; - - /* Extract relevant header fields */ - if (msg->type == _ODPL_ACTION_NR) { - reason = OFPR_ACTION; - max_len = msg->arg; - } else { - reason = OFPR_NO_MATCH; - max_len = INT_MAX; + if (upcall->type == DPIF_UC_ACTION) { + pin.send_len = MIN(pin.send_len, upcall->userdata); } - total_len = msg->length - sizeof *msg; - in_port = odp_port_to_ofp_port(msg->port); - - /* Repurpose packet buffer by overwriting header. */ - ofpbuf_pull(packet, sizeof(struct odp_msg)); - opi = ofpbuf_push_zeros(packet, offsetof(struct ofp_packet_in, data)); - opi->header.version = OFP_VERSION; - opi->header.type = OFPT_PACKET_IN; - opi->total_len = htons(total_len); - opi->in_port = htons(in_port); - opi->reason = reason; - return max_len; + /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might + * immediately call into do_send_packet_in() or it might buffer it for a + * while (until a later call to pinsched_run()). */ + msg = ofputil_encode_packet_in(&pin, clone ? NULL : upcall->packet); + pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1], + flow->in_port, msg, do_send_packet_in, ofconn); } -/* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or - * _ODPL_MISS_NR, sends an OFPT_PACKET_IN message to each OpenFlow controller - * as necessary according to their individual configurations. - * - * 'packet' must have sufficient headroom to convert it into a struct - * ofp_packet_in (e.g. as returned by dpif_recv()). +/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an + * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to + * their individual configurations. * - * Takes ownership of 'packet'. */ + * If 'clone' is true, the caller retains ownership of 'upcall->packet'. + * Otherwise, ownership is transferred to this function. */ static void -send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet) +send_packet_in(struct ofproto *ofproto, struct dpif_upcall *upcall, + const struct flow *flow, bool clone) { struct ofconn *ofconn, *prev; - int max_len; - - max_len = do_convert_to_packet_in(packet); prev = NULL; LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) { if (ofconn_receives_async_msgs(ofconn)) { if (prev) { - schedule_packet_in(prev, packet, max_len, true); + schedule_packet_in(prev, upcall, flow, true); } prev = ofconn; } } if (prev) { - schedule_packet_in(prev, packet, max_len, false); - } else { - ofpbuf_delete(packet); + schedule_packet_in(prev, upcall, flow, clone); + } else if (!clone) { + ofpbuf_delete(upcall->packet); } } @@ -4969,7 +5180,7 @@ trace_format_flow(struct ds *result, int level, const char *title, } static void -trace_resubmit(struct action_xlate_ctx *ctx, const struct rule *rule) +trace_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) { struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx); struct ds *result = trace->result; @@ -4992,7 +5203,7 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, struct ds result; struct flow flow; uint16_t in_port; - ovs_be32 tun_id; + ovs_be64 tun_id; char *s; ofpbuf_init(&packet, strlen(args) / 2); @@ -5014,7 +5225,7 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, goto exit; } - tun_id = ntohl(strtoul(tun_id_s, NULL, 10)); + tun_id = htonll(strtoull(tun_id_s, NULL, 0)); in_port = ofp_port_to_odp_port(atoi(in_port_s)); packet_s = ofpbuf_put_hex(&packet, packet_s, NULL); @@ -5085,7 +5296,7 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, uint16_t *nf_output_iface, void *ofproto_) { struct ofproto *ofproto = ofproto_; - int out_port; + struct mac_entry *dst_mac; /* Drop frames for reserved multicast addresses. */ if (eth_addr_is_reserved(flow->dl_dst)) { @@ -5093,31 +5304,37 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, } /* Learn source MAC (but don't try to learn from revalidation). */ - if (packet != NULL) { - tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src, - 0, flow->in_port, - GRAT_ARP_LOCK_NONE); - if (rev_tag) { + if (packet != NULL + && mac_learning_may_learn(ofproto->ml, flow->dl_src, 0)) { + struct mac_entry *src_mac; + + src_mac = mac_learning_insert(ofproto->ml, flow->dl_src, 0); + if (mac_entry_is_new(src_mac) || src_mac->port.i != flow->in_port) { /* The log messages here could actually be useful in debugging, * so keep the rate limit relatively high. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300); VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16, ETH_ADDR_ARGS(flow->dl_src), flow->in_port); - ofproto_revalidate(ofproto, rev_tag); + + ofproto_revalidate(ofproto, + mac_learning_changed(ofproto->ml, src_mac)); + src_mac->port.i = flow->in_port; } } /* Determine output port. */ - out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags, - NULL); - if (out_port < 0) { + dst_mac = mac_learning_lookup(ofproto->ml, flow->dl_dst, 0, tags); + if (!dst_mac) { flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD, nf_output_iface, odp_actions); - } else if (out_port != flow->in_port) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, out_port); - *nf_output_iface = out_port; } else { - /* Drop. */ + int out_port = dst_mac->port.i; + if (out_port != flow->in_port) { + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port); + *nf_output_iface = out_port; + } else { + /* Drop. */ + } } return true; @@ -5126,5 +5343,6 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, static const struct ofhooks default_ofhooks = { default_normal_ofhook_cb, NULL, + NULL, NULL };