2 * Copyright (c) 2009, 2010 Nicira Networks.
3 * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
22 #include <sys/socket.h>
24 #include <netinet/in.h>
27 #include "classifier.h"
29 #include "discovery.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
34 #include "mac-learning.h"
38 #include "ofp-print.h"
40 #include "ofproto-sflow.h"
42 #include "openflow/nicira-ext.h"
43 #include "openflow/openflow.h"
44 #include "openvswitch/datapath-protocol.h"
48 #include "poll-loop.h"
49 #include "port-array.h"
54 #include "stream-ssl.h"
62 #include <linux/types.h> /* XXX */
63 #include <linux/pkt_sched.h> /* XXX */
65 #define THIS_MODULE VLM_ofproto
68 #include "sflow_api.h"
72 TABLEID_CLASSIFIER = 1
76 struct netdev *netdev;
77 struct ofp_phy_port opp; /* In host byte order. */
80 static void ofport_free(struct ofport *);
81 static void hton_ofp_phy_port(struct ofp_phy_port *);
83 static int xlate_actions(const union ofp_action *in, size_t n_in,
84 const flow_t *flow, struct ofproto *ofproto,
85 const struct ofpbuf *packet,
86 struct odp_actions *out, tag_type *tags,
87 bool *may_set_up_flow, uint16_t *nf_output_iface);
92 uint64_t flow_cookie; /* Controller-issued identifier.
93 (Kept in network-byte order.) */
94 uint16_t idle_timeout; /* In seconds from time of last use. */
95 uint16_t hard_timeout; /* In seconds from time of creation. */
96 bool send_flow_removed; /* Send a flow removed message? */
97 long long int used; /* Last-used time (0 if never used). */
98 long long int created; /* Creation time. */
99 uint64_t packet_count; /* Number of packets received. */
100 uint64_t byte_count; /* Number of bytes received. */
101 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
102 tag_type tags; /* Tags (set only by hooks). */
103 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
105 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
106 * exact-match rule (having cr.wc.wildcards of 0) generated from the
107 * wildcard rule 'super'. In this case, 'list' is an element of the
110 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
111 * a list of subrules. A super-rule with no wildcards (where
112 * cr.wc.wildcards is 0) will never have any subrules. */
118 * 'n_actions' is the number of elements in the 'actions' array. A single
119 * action may take up more more than one element's worth of space.
121 * A subrule has no actions (it uses the super-rule's actions). */
123 union ofp_action *actions;
127 * A super-rule with wildcard fields never has ODP actions (since the
128 * datapath only supports exact-match flows). */
129 bool installed; /* Installed in datapath? */
130 bool may_install; /* True ordinarily; false if actions must
131 * be reassessed for every packet. */
133 union odp_action *odp_actions;
137 rule_is_hidden(const struct rule *rule)
139 /* Subrules are merely an implementation detail, so hide them from the
141 if (rule->super != NULL) {
145 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
146 * (e.g. by in-band control) and are intentionally hidden from the
148 if (rule->cr.priority > UINT16_MAX) {
155 static struct rule *rule_create(struct ofproto *, struct rule *super,
156 const union ofp_action *, size_t n_actions,
157 uint16_t idle_timeout, uint16_t hard_timeout,
158 uint64_t flow_cookie, bool send_flow_removed);
159 static void rule_free(struct rule *);
160 static void rule_destroy(struct ofproto *, struct rule *);
161 static struct rule *rule_from_cls_rule(const struct cls_rule *);
162 static void rule_insert(struct ofproto *, struct rule *,
163 struct ofpbuf *packet, uint16_t in_port);
164 static void rule_remove(struct ofproto *, struct rule *);
165 static bool rule_make_actions(struct ofproto *, struct rule *,
166 const struct ofpbuf *packet);
167 static void rule_install(struct ofproto *, struct rule *,
168 struct rule *displaced_rule);
169 static void rule_uninstall(struct ofproto *, struct rule *);
170 static void rule_post_uninstall(struct ofproto *, struct rule *);
171 static void send_flow_removed(struct ofproto *p, struct rule *rule,
172 long long int now, uint8_t reason);
174 /* ofproto supports two kinds of OpenFlow connections:
176 * - "Controller connections": Connections to ordinary OpenFlow controllers.
177 * ofproto maintains persistent connections to these controllers and by
178 * default sends them asynchronous messages such as packet-ins.
180 * - "Transient connections", e.g. from ovs-ofctl. When these connections
181 * drop, it is the other side's responsibility to reconnect them if
182 * necessary. ofproto does not send them asynchronous messages by default.
185 OFCONN_CONTROLLER, /* An OpenFlow controller. */
186 OFCONN_TRANSIENT /* A transient connection. */
189 /* An OpenFlow connection. */
191 struct ofproto *ofproto; /* The ofproto that owns this connection. */
192 struct list node; /* In struct ofproto's "all_conns" list. */
193 struct rconn *rconn; /* OpenFlow connection. */
194 enum ofconn_type type; /* Type. */
196 /* OFPT_PACKET_IN related data. */
197 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
198 struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */
199 struct pktbuf *pktbuf; /* OpenFlow packet buffers. */
200 int miss_send_len; /* Bytes to send of buffered packets. */
202 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
203 * requests, and the maximum number before we stop reading OpenFlow
205 #define OFCONN_REPLY_MAX 100
206 struct rconn_packet_counter *reply_counter;
208 /* type == OFCONN_CONTROLLER only. */
209 enum nx_role role; /* Role. */
210 struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */
211 struct discovery *discovery; /* Controller discovery object, if enabled. */
212 struct status_category *ss; /* Switch status category. */
213 enum ofproto_band band; /* In-band or out-of-band? */
216 /* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's
217 * "schedulers" array. Their values are 0 and 1, and their meanings and values
218 * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In
219 * case anything ever changes, check their values here. */
220 #define N_SCHEDULERS 2
221 BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0);
222 BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR);
223 BUILD_ASSERT_DECL(OFPR_ACTION == 1);
224 BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR);
226 static struct ofconn *ofconn_create(struct ofproto *, struct rconn *,
228 static void ofconn_destroy(struct ofconn *);
229 static void ofconn_run(struct ofconn *, struct ofproto *);
230 static void ofconn_wait(struct ofconn *);
231 static bool ofconn_receives_async_msgs(const struct ofconn *);
232 static char *ofconn_make_name(const struct ofproto *, const char *target);
234 static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
235 struct rconn_packet_counter *counter);
237 static void send_packet_in(struct ofproto *, struct ofpbuf *odp_msg);
238 static void do_send_packet_in(struct ofpbuf *odp_msg, void *ofconn);
242 uint64_t datapath_id; /* Datapath ID. */
243 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
244 char *mfr_desc; /* Manufacturer. */
245 char *hw_desc; /* Hardware. */
246 char *sw_desc; /* Software version. */
247 char *serial_desc; /* Serial number. */
248 char *dp_desc; /* Datapath description. */
252 struct netdev_monitor *netdev_monitor;
253 struct port_array ports; /* Index is ODP port nr; ofport->opp.port_no is
255 struct shash port_by_name;
259 struct switch_status *switch_status;
260 struct fail_open *fail_open;
261 struct netflow *netflow;
262 struct ofproto_sflow *sflow;
264 /* In-band control. */
265 struct in_band *in_band;
266 long long int next_in_band_update;
267 struct sockaddr_in *extra_in_band_remotes;
268 size_t n_extra_remotes;
271 struct classifier cls;
272 bool need_revalidate;
273 long long int next_expiration;
274 struct tag_set revalidate_set;
275 bool tun_id_from_cookie;
277 /* OpenFlow connections. */
278 struct hmap controllers; /* Controller "struct ofconn"s. */
279 struct list all_conns; /* Contains "struct ofconn"s. */
280 struct pvconn **listeners;
282 struct pvconn **snoops;
285 /* Hooks for ovs-vswitchd. */
286 const struct ofhooks *ofhooks;
289 /* Used by default ofhooks. */
290 struct mac_learning *ml;
293 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
295 static const struct ofhooks default_ofhooks;
297 static uint64_t pick_datapath_id(const struct ofproto *);
298 static uint64_t pick_fallback_dpid(void);
300 static void update_used(struct ofproto *);
301 static void update_stats(struct ofproto *, struct rule *,
302 const struct odp_flow_stats *);
303 static void expire_rule(struct cls_rule *, void *ofproto);
304 static void active_timeout(struct ofproto *ofproto, struct rule *rule);
305 static bool revalidate_rule(struct ofproto *p, struct rule *rule);
306 static void revalidate_cb(struct cls_rule *rule_, void *p_);
308 static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
310 static void handle_openflow(struct ofconn *, struct ofproto *,
313 static void refresh_port_groups(struct ofproto *);
315 static void update_port(struct ofproto *, const char *devname);
316 static int init_ports(struct ofproto *);
317 static void reinit_ports(struct ofproto *);
320 ofproto_create(const char *datapath, const char *datapath_type,
321 const struct ofhooks *ofhooks, void *aux,
322 struct ofproto **ofprotop)
324 struct odp_stats stats;
331 /* Connect to datapath and start listening for messages. */
332 error = dpif_open(datapath, datapath_type, &dpif);
334 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
337 error = dpif_get_dp_stats(dpif, &stats);
339 VLOG_ERR("failed to obtain stats for datapath %s: %s",
340 datapath, strerror(error));
344 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
346 VLOG_ERR("failed to listen on datapath %s: %s",
347 datapath, strerror(error));
351 dpif_flow_flush(dpif);
352 dpif_recv_purge(dpif);
354 /* Initialize settings. */
355 p = xzalloc(sizeof *p);
356 p->fallback_dpid = pick_fallback_dpid();
357 p->datapath_id = p->fallback_dpid;
358 p->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
359 p->hw_desc = xstrdup(DEFAULT_HW_DESC);
360 p->sw_desc = xstrdup(DEFAULT_SW_DESC);
361 p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
362 p->dp_desc = xstrdup(DEFAULT_DP_DESC);
364 /* Initialize datapath. */
366 p->netdev_monitor = netdev_monitor_create();
367 port_array_init(&p->ports);
368 shash_init(&p->port_by_name);
369 p->max_ports = stats.max_ports;
371 /* Initialize submodules. */
372 p->switch_status = switch_status_create(p);
378 /* Initialize flow table. */
379 classifier_init(&p->cls);
380 p->need_revalidate = false;
381 p->next_expiration = time_msec() + 1000;
382 tag_set_init(&p->revalidate_set);
384 /* Initialize OpenFlow connections. */
385 list_init(&p->all_conns);
386 hmap_init(&p->controllers);
392 /* Initialize hooks. */
394 p->ofhooks = ofhooks;
398 p->ofhooks = &default_ofhooks;
400 p->ml = mac_learning_create();
403 /* Pick final datapath ID. */
404 p->datapath_id = pick_datapath_id(p);
405 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
412 ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
414 uint64_t old_dpid = p->datapath_id;
415 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
416 if (p->datapath_id != old_dpid) {
417 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
419 /* Force all active connections to reconnect, since there is no way to
420 * notify a controller that the datapath ID has changed. */
421 ofproto_reconnect_controllers(p);
426 is_discovery_controller(const struct ofproto_controller *c)
428 return !strcmp(c->target, "discover");
432 is_in_band_controller(const struct ofproto_controller *c)
434 return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND;
437 /* Creates a new controller in 'ofproto'. Some of the settings are initially
438 * drawn from 'c', but update_controller() needs to be called later to finish
439 * the new ofconn's configuration. */
441 add_controller(struct ofproto *ofproto, const struct ofproto_controller *c)
443 struct discovery *discovery;
444 struct ofconn *ofconn;
446 if (is_discovery_controller(c)) {
447 int error = discovery_create(c->accept_re, c->update_resolv_conf,
448 ofproto->dpif, ofproto->switch_status,
457 ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_CONTROLLER);
458 ofconn->pktbuf = pktbuf_create();
459 ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
461 ofconn->discovery = discovery;
463 char *name = ofconn_make_name(ofproto, c->target);
464 rconn_connect(ofconn->rconn, c->target, name);
467 hmap_insert(&ofproto->controllers, &ofconn->hmap_node,
468 hash_string(c->target, 0));
471 /* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's
472 * target or turn discovery on or off (these are done by creating new ofconns
473 * and deleting old ones), but it can update the rest of an ofconn's
476 update_controller(struct ofconn *ofconn, const struct ofproto_controller *c)
478 struct ofproto *ofproto = ofconn->ofproto;
482 ofconn->band = (is_in_band_controller(c)
483 ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND);
485 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
487 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
488 rconn_set_probe_interval(ofconn->rconn, probe_interval);
490 if (ofconn->discovery) {
491 discovery_set_update_resolv_conf(ofconn->discovery,
492 c->update_resolv_conf);
493 discovery_set_accept_controller_re(ofconn->discovery, c->accept_re);
496 for (i = 0; i < N_SCHEDULERS; i++) {
497 struct pinsched **s = &ofconn->schedulers[i];
499 if (c->rate_limit > 0) {
501 *s = pinsched_create(c->rate_limit, c->burst_limit,
502 ofproto->switch_status);
504 pinsched_set_limits(*s, c->rate_limit, c->burst_limit);
507 pinsched_destroy(*s);
514 ofconn_get_target(const struct ofconn *ofconn)
516 return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn);
519 static struct ofconn *
520 find_controller_by_target(struct ofproto *ofproto, const char *target)
522 struct ofconn *ofconn;
524 HMAP_FOR_EACH_WITH_HASH (ofconn, struct ofconn, hmap_node,
525 hash_string(target, 0), &ofproto->controllers) {
526 if (!strcmp(ofconn_get_target(ofconn), target)) {
534 update_in_band_remotes(struct ofproto *ofproto)
536 const struct ofconn *ofconn;
537 struct sockaddr_in *addrs;
538 size_t max_addrs, n_addrs;
542 /* Allocate enough memory for as many remotes as we could possibly have. */
543 max_addrs = ofproto->n_extra_remotes + hmap_count(&ofproto->controllers);
544 addrs = xmalloc(max_addrs * sizeof *addrs);
547 /* Add all the remotes. */
549 HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &ofproto->controllers) {
550 struct sockaddr_in *sin = &addrs[n_addrs];
552 if (ofconn->band == OFPROTO_OUT_OF_BAND) {
556 sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn);
557 if (sin->sin_addr.s_addr) {
558 sin->sin_port = rconn_get_remote_port(ofconn->rconn);
561 if (ofconn->discovery) {
565 for (i = 0; i < ofproto->n_extra_remotes; i++) {
566 addrs[n_addrs++] = ofproto->extra_in_band_remotes[i];
569 /* Create or update or destroy in-band.
571 * Ordinarily we only enable in-band if there's at least one remote
572 * address, but discovery needs the in-band rules for DHCP to be installed
573 * even before we know any remote addresses. */
574 if (n_addrs || discovery) {
575 if (!ofproto->in_band) {
576 in_band_create(ofproto, ofproto->dpif, ofproto->switch_status,
579 if (ofproto->in_band) {
580 in_band_set_remotes(ofproto->in_band, addrs, n_addrs);
582 ofproto->next_in_band_update = time_msec() + 1000;
584 in_band_destroy(ofproto->in_band);
585 ofproto->in_band = NULL;
593 ofproto_set_controllers(struct ofproto *p,
594 const struct ofproto_controller *controllers,
595 size_t n_controllers)
597 struct shash new_controllers;
598 enum ofproto_fail_mode fail_mode;
599 struct ofconn *ofconn, *next;
603 shash_init(&new_controllers);
604 for (i = 0; i < n_controllers; i++) {
605 const struct ofproto_controller *c = &controllers[i];
607 shash_add_once(&new_controllers, c->target, &controllers[i]);
608 if (!find_controller_by_target(p, c->target)) {
609 add_controller(p, c);
613 fail_mode = OFPROTO_FAIL_STANDALONE;
615 HMAP_FOR_EACH_SAFE (ofconn, next, struct ofconn, hmap_node,
617 struct ofproto_controller *c;
619 c = shash_find_data(&new_controllers, ofconn_get_target(ofconn));
621 ofconn_destroy(ofconn);
623 update_controller(ofconn, c);
627 if (c->fail == OFPROTO_FAIL_SECURE) {
628 fail_mode = OFPROTO_FAIL_SECURE;
632 shash_destroy(&new_controllers);
634 update_in_band_remotes(p);
636 if (!hmap_is_empty(&p->controllers)
637 && fail_mode == OFPROTO_FAIL_STANDALONE) {
638 struct rconn **rconns;
642 p->fail_open = fail_open_create(p, p->switch_status);
646 rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns);
647 HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &p->controllers) {
648 rconns[n++] = ofconn->rconn;
651 fail_open_set_controllers(p->fail_open, rconns, n);
652 /* p->fail_open takes ownership of 'rconns'. */
654 fail_open_destroy(p->fail_open);
658 if (!hmap_is_empty(&p->controllers) && !ss_exists) {
659 ofconn = CONTAINER_OF(hmap_first(&p->controllers),
660 struct ofconn, hmap_node);
661 ofconn->ss = switch_status_register(p->switch_status, "remote",
662 rconn_status_cb, ofconn->rconn);
666 /* Drops the connections between 'ofproto' and all of its controllers, forcing
667 * them to reconnect. */
669 ofproto_reconnect_controllers(struct ofproto *ofproto)
671 struct ofconn *ofconn;
673 LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
674 rconn_reconnect(ofconn->rconn);
679 any_extras_changed(const struct ofproto *ofproto,
680 const struct sockaddr_in *extras, size_t n)
684 if (n != ofproto->n_extra_remotes) {
688 for (i = 0; i < n; i++) {
689 const struct sockaddr_in *old = &ofproto->extra_in_band_remotes[i];
690 const struct sockaddr_in *new = &extras[i];
692 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
693 old->sin_port != new->sin_port) {
701 /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s
702 * in-band control should guarantee access, in the same way that in-band
703 * control guarantees access to OpenFlow controllers. */
705 ofproto_set_extra_in_band_remotes(struct ofproto *ofproto,
706 const struct sockaddr_in *extras, size_t n)
708 if (!any_extras_changed(ofproto, extras, n)) {
712 free(ofproto->extra_in_band_remotes);
713 ofproto->n_extra_remotes = n;
714 ofproto->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
716 update_in_band_remotes(ofproto);
720 ofproto_set_desc(struct ofproto *p,
721 const char *mfr_desc, const char *hw_desc,
722 const char *sw_desc, const char *serial_desc,
725 struct ofp_desc_stats *ods;
728 if (strlen(mfr_desc) >= sizeof ods->mfr_desc) {
729 VLOG_WARN("truncating mfr_desc, must be less than %zu characters",
730 sizeof ods->mfr_desc);
733 p->mfr_desc = xstrdup(mfr_desc);
736 if (strlen(hw_desc) >= sizeof ods->hw_desc) {
737 VLOG_WARN("truncating hw_desc, must be less than %zu characters",
738 sizeof ods->hw_desc);
741 p->hw_desc = xstrdup(hw_desc);
744 if (strlen(sw_desc) >= sizeof ods->sw_desc) {
745 VLOG_WARN("truncating sw_desc, must be less than %zu characters",
746 sizeof ods->sw_desc);
749 p->sw_desc = xstrdup(sw_desc);
752 if (strlen(serial_desc) >= sizeof ods->serial_num) {
753 VLOG_WARN("truncating serial_desc, must be less than %zu "
755 sizeof ods->serial_num);
757 free(p->serial_desc);
758 p->serial_desc = xstrdup(serial_desc);
761 if (strlen(dp_desc) >= sizeof ods->dp_desc) {
762 VLOG_WARN("truncating dp_desc, must be less than %zu characters",
763 sizeof ods->dp_desc);
766 p->dp_desc = xstrdup(dp_desc);
771 set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
772 const struct svec *svec)
774 struct pvconn **pvconns = *pvconnsp;
775 size_t n_pvconns = *n_pvconnsp;
779 for (i = 0; i < n_pvconns; i++) {
780 pvconn_close(pvconns[i]);
784 pvconns = xmalloc(svec->n * sizeof *pvconns);
786 for (i = 0; i < svec->n; i++) {
787 const char *name = svec->names[i];
788 struct pvconn *pvconn;
791 error = pvconn_open(name, &pvconn);
793 pvconns[n_pvconns++] = pvconn;
795 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
803 *n_pvconnsp = n_pvconns;
809 ofproto_set_listeners(struct ofproto *ofproto, const struct svec *listeners)
811 return set_pvconns(&ofproto->listeners, &ofproto->n_listeners, listeners);
815 ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
817 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
821 ofproto_set_netflow(struct ofproto *ofproto,
822 const struct netflow_options *nf_options)
824 if (nf_options && nf_options->collectors.n) {
825 if (!ofproto->netflow) {
826 ofproto->netflow = netflow_create();
828 return netflow_set_options(ofproto->netflow, nf_options);
830 netflow_destroy(ofproto->netflow);
831 ofproto->netflow = NULL;
837 ofproto_set_sflow(struct ofproto *ofproto,
838 const struct ofproto_sflow_options *oso)
840 struct ofproto_sflow *os = ofproto->sflow;
843 struct ofport *ofport;
844 unsigned int odp_port;
846 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
847 refresh_port_groups(ofproto);
848 PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
849 ofproto_sflow_add_port(os, odp_port,
850 netdev_get_name(ofport->netdev));
853 ofproto_sflow_set_options(os, oso);
855 ofproto_sflow_destroy(os);
856 ofproto->sflow = NULL;
861 ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
865 VLOG_WARN("STP is not yet implemented");
873 ofproto_get_datapath_id(const struct ofproto *ofproto)
875 return ofproto->datapath_id;
879 ofproto_has_controller(const struct ofproto *ofproto)
881 return !hmap_is_empty(&ofproto->controllers);
885 ofproto_get_listeners(const struct ofproto *ofproto, struct svec *listeners)
889 for (i = 0; i < ofproto->n_listeners; i++) {
890 svec_add(listeners, pvconn_get_name(ofproto->listeners[i]));
895 ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
899 for (i = 0; i < ofproto->n_snoops; i++) {
900 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
905 ofproto_destroy(struct ofproto *p)
907 struct ofconn *ofconn, *next_ofconn;
908 struct ofport *ofport;
909 unsigned int port_no;
916 /* Destroy fail-open and in-band early, since they touch the classifier. */
917 fail_open_destroy(p->fail_open);
920 in_band_destroy(p->in_band);
922 free(p->extra_in_band_remotes);
924 ofproto_flush_flows(p);
925 classifier_destroy(&p->cls);
927 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
929 ofconn_destroy(ofconn);
931 hmap_destroy(&p->controllers);
934 netdev_monitor_destroy(p->netdev_monitor);
935 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
938 shash_destroy(&p->port_by_name);
940 switch_status_destroy(p->switch_status);
941 netflow_destroy(p->netflow);
942 ofproto_sflow_destroy(p->sflow);
944 for (i = 0; i < p->n_listeners; i++) {
945 pvconn_close(p->listeners[i]);
949 for (i = 0; i < p->n_snoops; i++) {
950 pvconn_close(p->snoops[i]);
954 mac_learning_destroy(p->ml);
959 free(p->serial_desc);
962 port_array_destroy(&p->ports);
968 ofproto_run(struct ofproto *p)
970 int error = ofproto_run1(p);
972 error = ofproto_run2(p, false);
978 process_port_change(struct ofproto *ofproto, int error, char *devname)
980 if (error == ENOBUFS) {
981 reinit_ports(ofproto);
983 update_port(ofproto, devname);
988 /* Returns a "preference level" for snooping 'ofconn'. A higher return value
989 * means that 'ofconn' is more interesting for monitoring than a lower return
992 snoop_preference(const struct ofconn *ofconn)
994 switch (ofconn->role) {
1002 /* Shouldn't happen. */
1007 /* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'.
1008 * Connects this vconn to a controller. */
1010 add_snooper(struct ofproto *ofproto, struct vconn *vconn)
1012 struct ofconn *ofconn, *best;
1014 /* Pick a controller for monitoring. */
1016 LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
1017 if (ofconn->type == OFCONN_CONTROLLER
1018 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
1024 rconn_add_monitor(best->rconn, vconn);
1026 VLOG_INFO_RL(&rl, "no controller connection to snoop");
1032 ofproto_run1(struct ofproto *p)
1034 struct ofconn *ofconn, *next_ofconn;
1039 if (shash_is_empty(&p->port_by_name)) {
1043 for (i = 0; i < 50; i++) {
1047 error = dpif_recv(p->dpif, &buf);
1049 if (error == ENODEV) {
1050 /* Someone destroyed the datapath behind our back. The caller
1051 * better destroy us and give up, because we're just going to
1052 * spin from here on out. */
1053 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
1054 VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
1055 dpif_name(p->dpif));
1061 handle_odp_msg(p, buf);
1064 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
1065 process_port_change(p, error, devname);
1067 while ((error = netdev_monitor_poll(p->netdev_monitor,
1068 &devname)) != EAGAIN) {
1069 process_port_change(p, error, devname);
1073 if (time_msec() >= p->next_in_band_update) {
1074 update_in_band_remotes(p);
1076 in_band_run(p->in_band);
1079 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
1081 ofconn_run(ofconn, p);
1084 /* Fail-open maintenance. Do this after processing the ofconns since
1085 * fail-open checks the status of the controller rconn. */
1087 fail_open_run(p->fail_open);
1090 for (i = 0; i < p->n_listeners; i++) {
1091 struct vconn *vconn;
1094 retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn);
1096 struct rconn *rconn;
1099 rconn = rconn_create(60, 0);
1100 name = ofconn_make_name(p, vconn_get_name(vconn));
1101 rconn_connect_unreliably(rconn, vconn, name);
1104 ofconn_create(p, rconn, OFCONN_TRANSIENT);
1105 } else if (retval != EAGAIN) {
1106 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1110 for (i = 0; i < p->n_snoops; i++) {
1111 struct vconn *vconn;
1114 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
1116 add_snooper(p, vconn);
1117 } else if (retval != EAGAIN) {
1118 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1122 if (time_msec() >= p->next_expiration) {
1123 COVERAGE_INC(ofproto_expiration);
1124 p->next_expiration = time_msec() + 1000;
1127 classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
1129 /* Let the hook know that we're at a stable point: all outstanding data
1130 * in existing flows has been accounted to the account_cb. Thus, the
1131 * hook can now reasonably do operations that depend on having accurate
1132 * flow volume accounting (currently, that's just bond rebalancing). */
1133 if (p->ofhooks->account_checkpoint_cb) {
1134 p->ofhooks->account_checkpoint_cb(p->aux);
1139 netflow_run(p->netflow);
1142 ofproto_sflow_run(p->sflow);
1148 struct revalidate_cbdata {
1149 struct ofproto *ofproto;
1150 bool revalidate_all; /* Revalidate all exact-match rules? */
1151 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
1152 struct tag_set revalidate_set; /* Set of tags to revalidate. */
1156 ofproto_run2(struct ofproto *p, bool revalidate_all)
1158 if (p->need_revalidate || revalidate_all
1159 || !tag_set_is_empty(&p->revalidate_set)) {
1160 struct revalidate_cbdata cbdata;
1162 cbdata.revalidate_all = revalidate_all;
1163 cbdata.revalidate_subrules = p->need_revalidate;
1164 cbdata.revalidate_set = p->revalidate_set;
1165 tag_set_init(&p->revalidate_set);
1166 COVERAGE_INC(ofproto_revalidate);
1167 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
1168 p->need_revalidate = false;
1175 ofproto_wait(struct ofproto *p)
1177 struct ofconn *ofconn;
1180 dpif_recv_wait(p->dpif);
1181 dpif_port_poll_wait(p->dpif);
1182 netdev_monitor_poll_wait(p->netdev_monitor);
1183 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1184 ofconn_wait(ofconn);
1187 poll_timer_wait_until(p->next_in_band_update);
1188 in_band_wait(p->in_band);
1191 fail_open_wait(p->fail_open);
1194 ofproto_sflow_wait(p->sflow);
1196 if (!tag_set_is_empty(&p->revalidate_set)) {
1197 poll_immediate_wake();
1199 if (p->need_revalidate) {
1200 /* Shouldn't happen, but if it does just go around again. */
1201 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1202 poll_immediate_wake();
1203 } else if (p->next_expiration != LLONG_MAX) {
1204 poll_timer_wait_until(p->next_expiration);
1206 for (i = 0; i < p->n_listeners; i++) {
1207 pvconn_wait(p->listeners[i]);
1209 for (i = 0; i < p->n_snoops; i++) {
1210 pvconn_wait(p->snoops[i]);
1215 ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
1217 tag_set_add(&ofproto->revalidate_set, tag);
1221 ofproto_get_revalidate_set(struct ofproto *ofproto)
1223 return &ofproto->revalidate_set;
1227 ofproto_is_alive(const struct ofproto *p)
1229 return !hmap_is_empty(&p->controllers);
1233 ofproto_send_packet(struct ofproto *p, const flow_t *flow,
1234 const union ofp_action *actions, size_t n_actions,
1235 const struct ofpbuf *packet)
1237 struct odp_actions odp_actions;
1240 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
1246 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1248 dpif_execute(p->dpif, flow->in_port, odp_actions.actions,
1249 odp_actions.n_actions, packet);
1254 ofproto_add_flow(struct ofproto *p,
1255 const flow_t *flow, uint32_t wildcards, unsigned int priority,
1256 const union ofp_action *actions, size_t n_actions,
1260 rule = rule_create(p, NULL, actions, n_actions,
1261 idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
1263 cls_rule_from_flow(flow, wildcards, priority, &rule->cr);
1264 rule_insert(p, rule, NULL, 0);
1268 ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
1269 uint32_t wildcards, unsigned int priority)
1273 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1277 rule_remove(ofproto, rule);
1282 destroy_rule(struct cls_rule *rule_, void *ofproto_)
1284 struct rule *rule = rule_from_cls_rule(rule_);
1285 struct ofproto *ofproto = ofproto_;
1287 /* Mark the flow as not installed, even though it might really be
1288 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1289 * There is no point in uninstalling it individually since we are about to
1290 * blow away all the flows with dpif_flow_flush(). */
1291 rule->installed = false;
1293 rule_remove(ofproto, rule);
1297 ofproto_flush_flows(struct ofproto *ofproto)
1299 COVERAGE_INC(ofproto_flush);
1300 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
1301 dpif_flow_flush(ofproto->dpif);
1302 if (ofproto->in_band) {
1303 in_band_flushed(ofproto->in_band);
1305 if (ofproto->fail_open) {
1306 fail_open_flushed(ofproto->fail_open);
1311 reinit_ports(struct ofproto *p)
1313 struct svec devnames;
1314 struct ofport *ofport;
1315 unsigned int port_no;
1316 struct odp_port *odp_ports;
1320 svec_init(&devnames);
1321 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
1322 svec_add (&devnames, (char *) ofport->opp.name);
1324 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
1325 for (i = 0; i < n_odp_ports; i++) {
1326 svec_add (&devnames, odp_ports[i].devname);
1330 svec_sort_unique(&devnames);
1331 for (i = 0; i < devnames.n; i++) {
1332 update_port(p, devnames.names[i]);
1334 svec_destroy(&devnames);
1338 refresh_port_group(struct ofproto *p, unsigned int group)
1342 struct ofport *port;
1343 unsigned int port_no;
1345 assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
1347 ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
1349 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1350 if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
1351 ports[n_ports++] = port_no;
1354 dpif_port_group_set(p->dpif, group, ports, n_ports);
1361 refresh_port_groups(struct ofproto *p)
1363 size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
1364 size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
1366 ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
1370 static struct ofport *
1371 make_ofport(const struct odp_port *odp_port)
1373 struct netdev_options netdev_options;
1374 enum netdev_flags flags;
1375 struct ofport *ofport;
1376 struct netdev *netdev;
1380 memset(&netdev_options, 0, sizeof netdev_options);
1381 netdev_options.name = odp_port->devname;
1382 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
1384 error = netdev_open(&netdev_options, &netdev);
1386 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1387 "cannot be opened (%s)",
1388 odp_port->devname, odp_port->port,
1389 odp_port->devname, strerror(error));
1393 ofport = xmalloc(sizeof *ofport);
1394 ofport->netdev = netdev;
1395 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
1396 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
1397 memcpy(ofport->opp.name, odp_port->devname,
1398 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1399 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1401 netdev_get_flags(netdev, &flags);
1402 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1404 netdev_get_carrier(netdev, &carrier);
1405 ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
1407 netdev_get_features(netdev,
1408 &ofport->opp.curr, &ofport->opp.advertised,
1409 &ofport->opp.supported, &ofport->opp.peer);
1414 ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1416 if (port_array_get(&p->ports, odp_port->port)) {
1417 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1420 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1421 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1430 ofport_equal(const struct ofport *a_, const struct ofport *b_)
1432 const struct ofp_phy_port *a = &a_->opp;
1433 const struct ofp_phy_port *b = &b_->opp;
1435 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1436 return (a->port_no == b->port_no
1437 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1438 && !strcmp((char *) a->name, (char *) b->name)
1439 && a->state == b->state
1440 && a->config == b->config
1441 && a->curr == b->curr
1442 && a->advertised == b->advertised
1443 && a->supported == b->supported
1444 && a->peer == b->peer);
1448 send_port_status(struct ofproto *p, const struct ofport *ofport,
1451 /* XXX Should limit the number of queued port status change messages. */
1452 struct ofconn *ofconn;
1453 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1454 struct ofp_port_status *ops;
1457 if (!ofconn_receives_async_msgs(ofconn)) {
1461 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1462 ops->reason = reason;
1463 ops->desc = ofport->opp;
1464 hton_ofp_phy_port(&ops->desc);
1465 queue_tx(b, ofconn, NULL);
1467 if (p->ofhooks->port_changed_cb) {
1468 p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
1473 ofport_install(struct ofproto *p, struct ofport *ofport)
1475 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1476 const char *netdev_name = (const char *) ofport->opp.name;
1478 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
1479 port_array_set(&p->ports, odp_port, ofport);
1480 shash_add(&p->port_by_name, netdev_name, ofport);
1482 ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
1487 ofport_remove(struct ofproto *p, struct ofport *ofport)
1489 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1491 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
1492 port_array_delete(&p->ports, odp_port);
1493 shash_delete(&p->port_by_name,
1494 shash_find(&p->port_by_name, (char *) ofport->opp.name));
1496 ofproto_sflow_del_port(p->sflow, odp_port);
1501 ofport_free(struct ofport *ofport)
1504 netdev_close(ofport->netdev);
1510 update_port(struct ofproto *p, const char *devname)
1512 struct odp_port odp_port;
1513 struct ofport *old_ofport;
1514 struct ofport *new_ofport;
1517 COVERAGE_INC(ofproto_update_port);
1519 /* Query the datapath for port information. */
1520 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
1522 /* Find the old ofport. */
1523 old_ofport = shash_find_data(&p->port_by_name, devname);
1526 /* There's no port named 'devname' but there might be a port with
1527 * the same port number. This could happen if a port is deleted
1528 * and then a new one added in its place very quickly, or if a port
1529 * is renamed. In the former case we want to send an OFPPR_DELETE
1530 * and an OFPPR_ADD, and in the latter case we want to send a
1531 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1532 * the old port's ifindex against the new port, or perhaps less
1533 * reliably but more portably by comparing the old port's MAC
1534 * against the new port's MAC. However, this code isn't that smart
1535 * and always sends an OFPPR_MODIFY (XXX). */
1536 old_ofport = port_array_get(&p->ports, odp_port.port);
1538 } else if (error != ENOENT && error != ENODEV) {
1539 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1540 "%s", strerror(error));
1544 /* Create a new ofport. */
1545 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1547 /* Eliminate a few pathological cases. */
1548 if (!old_ofport && !new_ofport) {
1550 } else if (old_ofport && new_ofport) {
1551 /* Most of the 'config' bits are OpenFlow soft state, but
1552 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1553 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1554 * leaves the other bits 0.) */
1555 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1557 if (ofport_equal(old_ofport, new_ofport)) {
1558 /* False alarm--no change. */
1559 ofport_free(new_ofport);
1564 /* Now deal with the normal cases. */
1566 ofport_remove(p, old_ofport);
1569 ofport_install(p, new_ofport);
1571 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1572 (!old_ofport ? OFPPR_ADD
1573 : !new_ofport ? OFPPR_DELETE
1575 ofport_free(old_ofport);
1577 /* Update port groups. */
1578 refresh_port_groups(p);
1582 init_ports(struct ofproto *p)
1584 struct odp_port *ports;
1589 error = dpif_port_list(p->dpif, &ports, &n_ports);
1594 for (i = 0; i < n_ports; i++) {
1595 const struct odp_port *odp_port = &ports[i];
1596 if (!ofport_conflicts(p, odp_port)) {
1597 struct ofport *ofport = make_ofport(odp_port);
1599 ofport_install(p, ofport);
1604 refresh_port_groups(p);
1608 static struct ofconn *
1609 ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type)
1611 struct ofconn *ofconn = xzalloc(sizeof *ofconn);
1612 ofconn->ofproto = p;
1613 list_push_back(&p->all_conns, &ofconn->node);
1614 ofconn->rconn = rconn;
1615 ofconn->type = type;
1616 ofconn->role = NX_ROLE_OTHER;
1617 ofconn->packet_in_counter = rconn_packet_counter_create ();
1618 ofconn->pktbuf = NULL;
1619 ofconn->miss_send_len = 0;
1620 ofconn->reply_counter = rconn_packet_counter_create ();
1625 ofconn_destroy(struct ofconn *ofconn)
1627 if (ofconn->type == OFCONN_CONTROLLER) {
1628 hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
1630 discovery_destroy(ofconn->discovery);
1632 list_remove(&ofconn->node);
1633 switch_status_unregister(ofconn->ss);
1634 rconn_destroy(ofconn->rconn);
1635 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1636 rconn_packet_counter_destroy(ofconn->reply_counter);
1637 pktbuf_destroy(ofconn->pktbuf);
1642 ofconn_run(struct ofconn *ofconn, struct ofproto *p)
1647 if (ofconn->discovery) {
1648 char *controller_name;
1649 if (rconn_is_connectivity_questionable(ofconn->rconn)) {
1650 discovery_question_connectivity(ofconn->discovery);
1652 if (discovery_run(ofconn->discovery, &controller_name)) {
1653 if (controller_name) {
1654 char *ofconn_name = ofconn_make_name(p, controller_name);
1655 rconn_connect(ofconn->rconn, controller_name, ofconn_name);
1658 rconn_disconnect(ofconn->rconn);
1663 for (i = 0; i < N_SCHEDULERS; i++) {
1664 pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
1667 rconn_run(ofconn->rconn);
1669 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1670 /* Limit the number of iterations to prevent other tasks from
1672 for (iteration = 0; iteration < 50; iteration++) {
1673 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1678 fail_open_maybe_recover(p->fail_open);
1680 handle_openflow(ofconn, p, of_msg);
1681 ofpbuf_delete(of_msg);
1685 if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) {
1686 ofconn_destroy(ofconn);
1691 ofconn_wait(struct ofconn *ofconn)
1695 if (ofconn->discovery) {
1696 discovery_wait(ofconn->discovery);
1698 for (i = 0; i < N_SCHEDULERS; i++) {
1699 pinsched_wait(ofconn->schedulers[i]);
1701 rconn_run_wait(ofconn->rconn);
1702 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1703 rconn_recv_wait(ofconn->rconn);
1705 COVERAGE_INC(ofproto_ofconn_stuck);
1709 /* Returns true if 'ofconn' should receive asynchronous messages. */
1711 ofconn_receives_async_msgs(const struct ofconn *ofconn)
1713 if (ofconn->type == OFCONN_CONTROLLER) {
1714 /* Ordinary controllers always get asynchronous messages unless they
1715 * have configured themselves as "slaves". */
1716 return ofconn->role != NX_ROLE_SLAVE;
1718 /* Transient connections don't get asynchronous messages unless they
1719 * have explicitly asked for them by setting a nonzero miss send
1721 return ofconn->miss_send_len > 0;
1725 /* Returns a human-readable name for an OpenFlow connection between 'ofproto'
1726 * and 'target', suitable for use in log messages for identifying the
1729 * The name is dynamically allocated. The caller should free it (with free())
1730 * when it is no longer needed. */
1732 ofconn_make_name(const struct ofproto *ofproto, const char *target)
1734 return xasprintf("%s<->%s", dpif_base_name(ofproto->dpif), target);
1737 /* Caller is responsible for initializing the 'cr' member of the returned
1739 static struct rule *
1740 rule_create(struct ofproto *ofproto, struct rule *super,
1741 const union ofp_action *actions, size_t n_actions,
1742 uint16_t idle_timeout, uint16_t hard_timeout,
1743 uint64_t flow_cookie, bool send_flow_removed)
1745 struct rule *rule = xzalloc(sizeof *rule);
1746 rule->idle_timeout = idle_timeout;
1747 rule->hard_timeout = hard_timeout;
1748 rule->flow_cookie = flow_cookie;
1749 rule->used = rule->created = time_msec();
1750 rule->send_flow_removed = send_flow_removed;
1751 rule->super = super;
1753 list_push_back(&super->list, &rule->list);
1755 list_init(&rule->list);
1757 rule->n_actions = n_actions;
1758 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
1759 netflow_flow_clear(&rule->nf_flow);
1760 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1765 static struct rule *
1766 rule_from_cls_rule(const struct cls_rule *cls_rule)
1768 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1772 rule_free(struct rule *rule)
1774 free(rule->actions);
1775 free(rule->odp_actions);
1779 /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1780 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1781 * through all of its subrules and revalidates them, destroying any that no
1782 * longer has a super-rule (which is probably all of them).
1784 * Before calling this function, the caller must make have removed 'rule' from
1785 * the classifier. If 'rule' is an exact-match rule, the caller is also
1786 * responsible for ensuring that it has been uninstalled from the datapath. */
1788 rule_destroy(struct ofproto *ofproto, struct rule *rule)
1791 struct rule *subrule, *next;
1792 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
1793 revalidate_rule(ofproto, subrule);
1796 list_remove(&rule->list);
1802 rule_has_out_port(const struct rule *rule, uint16_t out_port)
1804 const union ofp_action *oa;
1805 struct actions_iterator i;
1807 if (out_port == htons(OFPP_NONE)) {
1810 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1811 oa = actions_next(&i)) {
1812 if (action_outputs_to_port(oa, out_port)) {
1819 /* Executes the actions indicated by 'rule' on 'packet', which is in flow
1820 * 'flow' and is considered to have arrived on ODP port 'in_port'.
1822 * The flow that 'packet' actually contains does not need to actually match
1823 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1824 * the packet and byte counters for 'rule' will be credited for the packet sent
1825 * out whether or not the packet actually matches 'rule'.
1827 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1828 * the caller must already have accurately composed ODP actions for it given
1829 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1830 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1831 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1832 * actions and apply them to 'packet'. */
1834 rule_execute(struct ofproto *ofproto, struct rule *rule,
1835 struct ofpbuf *packet, const flow_t *flow)
1837 const union odp_action *actions;
1839 struct odp_actions a;
1841 /* Grab or compose the ODP actions.
1843 * The special case for an exact-match 'rule' where 'flow' is not the
1844 * rule's flow is important to avoid, e.g., sending a packet out its input
1845 * port simply because the ODP actions were composed for the wrong
1847 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1848 struct rule *super = rule->super ? rule->super : rule;
1849 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
1850 packet, &a, NULL, 0, NULL)) {
1853 actions = a.actions;
1854 n_actions = a.n_actions;
1856 actions = rule->odp_actions;
1857 n_actions = rule->n_odp_actions;
1860 /* Execute the ODP actions. */
1861 if (!dpif_execute(ofproto->dpif, flow->in_port,
1862 actions, n_actions, packet)) {
1863 struct odp_flow_stats stats;
1864 flow_extract_stats(flow, packet, &stats);
1865 update_stats(ofproto, rule, &stats);
1866 rule->used = time_msec();
1867 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
1872 rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
1875 struct rule *displaced_rule;
1877 /* Insert the rule in the classifier. */
1878 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
1879 if (!rule->cr.wc.wildcards) {
1880 rule_make_actions(p, rule, packet);
1883 /* Send the packet and credit it to the rule. */
1886 flow_extract(packet, 0, in_port, &flow);
1887 rule_execute(p, rule, packet, &flow);
1890 /* Install the rule in the datapath only after sending the packet, to
1891 * avoid packet reordering. */
1892 if (rule->cr.wc.wildcards) {
1893 COVERAGE_INC(ofproto_add_wc_flow);
1894 p->need_revalidate = true;
1896 rule_install(p, rule, displaced_rule);
1899 /* Free the rule that was displaced, if any. */
1900 if (displaced_rule) {
1901 rule_destroy(p, displaced_rule);
1905 static struct rule *
1906 rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
1909 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
1910 rule->idle_timeout, rule->hard_timeout,
1912 COVERAGE_INC(ofproto_subrule_create);
1913 cls_rule_from_flow(flow, 0, (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
1914 : rule->cr.priority), &subrule->cr);
1915 classifier_insert_exact(&ofproto->cls, &subrule->cr);
1921 rule_remove(struct ofproto *ofproto, struct rule *rule)
1923 if (rule->cr.wc.wildcards) {
1924 COVERAGE_INC(ofproto_del_wc_flow);
1925 ofproto->need_revalidate = true;
1927 rule_uninstall(ofproto, rule);
1929 classifier_remove(&ofproto->cls, &rule->cr);
1930 rule_destroy(ofproto, rule);
1933 /* Returns true if the actions changed, false otherwise. */
1935 rule_make_actions(struct ofproto *p, struct rule *rule,
1936 const struct ofpbuf *packet)
1938 const struct rule *super;
1939 struct odp_actions a;
1942 assert(!rule->cr.wc.wildcards);
1944 super = rule->super ? rule->super : rule;
1946 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
1947 packet, &a, &rule->tags, &rule->may_install,
1948 &rule->nf_flow.output_iface);
1950 actions_len = a.n_actions * sizeof *a.actions;
1951 if (rule->n_odp_actions != a.n_actions
1952 || memcmp(rule->odp_actions, a.actions, actions_len)) {
1953 COVERAGE_INC(ofproto_odp_unchanged);
1954 free(rule->odp_actions);
1955 rule->n_odp_actions = a.n_actions;
1956 rule->odp_actions = xmemdup(a.actions, actions_len);
1964 do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
1965 struct odp_flow_put *put)
1967 memset(&put->flow.stats, 0, sizeof put->flow.stats);
1968 put->flow.key = rule->cr.flow;
1969 put->flow.actions = rule->odp_actions;
1970 put->flow.n_actions = rule->n_odp_actions;
1971 put->flow.flags = 0;
1973 return dpif_flow_put(ofproto->dpif, put);
1977 rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
1979 assert(!rule->cr.wc.wildcards);
1981 if (rule->may_install) {
1982 struct odp_flow_put put;
1983 if (!do_put_flow(p, rule,
1984 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
1986 rule->installed = true;
1987 if (displaced_rule) {
1988 update_stats(p, displaced_rule, &put.flow.stats);
1989 rule_post_uninstall(p, displaced_rule);
1992 } else if (displaced_rule) {
1993 rule_uninstall(p, displaced_rule);
1998 rule_reinstall(struct ofproto *ofproto, struct rule *rule)
2000 if (rule->installed) {
2001 struct odp_flow_put put;
2002 COVERAGE_INC(ofproto_dp_missed);
2003 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
2005 rule_install(ofproto, rule, NULL);
2010 rule_update_actions(struct ofproto *ofproto, struct rule *rule)
2012 bool actions_changed;
2013 uint16_t new_out_iface, old_out_iface;
2015 old_out_iface = rule->nf_flow.output_iface;
2016 actions_changed = rule_make_actions(ofproto, rule, NULL);
2018 if (rule->may_install) {
2019 if (rule->installed) {
2020 if (actions_changed) {
2021 struct odp_flow_put put;
2022 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
2023 | ODPPF_ZERO_STATS, &put);
2024 update_stats(ofproto, rule, &put.flow.stats);
2026 /* Temporarily set the old output iface so that NetFlow
2027 * messages have the correct output interface for the old
2029 new_out_iface = rule->nf_flow.output_iface;
2030 rule->nf_flow.output_iface = old_out_iface;
2031 rule_post_uninstall(ofproto, rule);
2032 rule->nf_flow.output_iface = new_out_iface;
2035 rule_install(ofproto, rule, NULL);
2038 rule_uninstall(ofproto, rule);
2043 rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
2045 uint64_t total_bytes = rule->byte_count + extra_bytes;
2047 if (ofproto->ofhooks->account_flow_cb
2048 && total_bytes > rule->accounted_bytes)
2050 ofproto->ofhooks->account_flow_cb(
2051 &rule->cr.flow, rule->odp_actions, rule->n_odp_actions,
2052 total_bytes - rule->accounted_bytes, ofproto->aux);
2053 rule->accounted_bytes = total_bytes;
2058 rule_uninstall(struct ofproto *p, struct rule *rule)
2060 assert(!rule->cr.wc.wildcards);
2061 if (rule->installed) {
2062 struct odp_flow odp_flow;
2064 odp_flow.key = rule->cr.flow;
2065 odp_flow.actions = NULL;
2066 odp_flow.n_actions = 0;
2068 if (!dpif_flow_del(p->dpif, &odp_flow)) {
2069 update_stats(p, rule, &odp_flow.stats);
2071 rule->installed = false;
2073 rule_post_uninstall(p, rule);
2078 is_controller_rule(struct rule *rule)
2080 /* If the only action is send to the controller then don't report
2081 * NetFlow expiration messages since it is just part of the control
2082 * logic for the network and not real traffic. */
2086 && rule->super->n_actions == 1
2087 && action_outputs_to_port(&rule->super->actions[0],
2088 htons(OFPP_CONTROLLER)));
2092 rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
2094 struct rule *super = rule->super;
2096 rule_account(ofproto, rule, 0);
2098 if (ofproto->netflow && !is_controller_rule(rule)) {
2099 struct ofexpired expired;
2100 expired.flow = rule->cr.flow;
2101 expired.packet_count = rule->packet_count;
2102 expired.byte_count = rule->byte_count;
2103 expired.used = rule->used;
2104 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
2107 super->packet_count += rule->packet_count;
2108 super->byte_count += rule->byte_count;
2110 /* Reset counters to prevent double counting if the rule ever gets
2112 rule->packet_count = 0;
2113 rule->byte_count = 0;
2114 rule->accounted_bytes = 0;
2116 netflow_flow_clear(&rule->nf_flow);
2121 queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
2122 struct rconn_packet_counter *counter)
2124 update_openflow_length(msg);
2125 if (rconn_send(ofconn->rconn, msg, counter)) {
2131 send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
2132 int error, const void *data, size_t len)
2135 struct ofp_error_msg *oem;
2137 if (!(error >> 16)) {
2138 VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
2143 COVERAGE_INC(ofproto_error);
2144 oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
2145 oh ? oh->xid : 0, &buf);
2146 oem->type = htons((unsigned int) error >> 16);
2147 oem->code = htons(error & 0xffff);
2148 memcpy(oem->data, data, len);
2149 queue_tx(buf, ofconn, ofconn->reply_counter);
2153 send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
2156 size_t oh_length = ntohs(oh->length);
2157 send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
2161 hton_ofp_phy_port(struct ofp_phy_port *opp)
2163 opp->port_no = htons(opp->port_no);
2164 opp->config = htonl(opp->config);
2165 opp->state = htonl(opp->state);
2166 opp->curr = htonl(opp->curr);
2167 opp->advertised = htonl(opp->advertised);
2168 opp->supported = htonl(opp->supported);
2169 opp->peer = htonl(opp->peer);
2173 handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
2175 struct ofp_header *rq = oh;
2176 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
2181 handle_features_request(struct ofproto *p, struct ofconn *ofconn,
2182 struct ofp_header *oh)
2184 struct ofp_switch_features *osf;
2186 unsigned int port_no;
2187 struct ofport *port;
2189 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
2190 osf->datapath_id = htonll(p->datapath_id);
2191 osf->n_buffers = htonl(pktbuf_capacity());
2193 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
2194 OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
2195 osf->actions = htonl((1u << OFPAT_OUTPUT) |
2196 (1u << OFPAT_SET_VLAN_VID) |
2197 (1u << OFPAT_SET_VLAN_PCP) |
2198 (1u << OFPAT_STRIP_VLAN) |
2199 (1u << OFPAT_SET_DL_SRC) |
2200 (1u << OFPAT_SET_DL_DST) |
2201 (1u << OFPAT_SET_NW_SRC) |
2202 (1u << OFPAT_SET_NW_DST) |
2203 (1u << OFPAT_SET_NW_TOS) |
2204 (1u << OFPAT_SET_TP_SRC) |
2205 (1u << OFPAT_SET_TP_DST) |
2206 (1u << OFPAT_ENQUEUE));
2208 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2209 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
2212 queue_tx(buf, ofconn, ofconn->reply_counter);
2217 handle_get_config_request(struct ofproto *p, struct ofconn *ofconn,
2218 struct ofp_header *oh)
2221 struct ofp_switch_config *osc;
2225 /* Figure out flags. */
2226 dpif_get_drop_frags(p->dpif, &drop_frags);
2227 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
2230 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
2231 osc->flags = htons(flags);
2232 osc->miss_send_len = htons(ofconn->miss_send_len);
2233 queue_tx(buf, ofconn, ofconn->reply_counter);
2239 handle_set_config(struct ofproto *p, struct ofconn *ofconn,
2240 struct ofp_switch_config *osc)
2245 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
2249 flags = ntohs(osc->flags);
2251 if (ofconn->type == OFCONN_CONTROLLER && ofconn->role != NX_ROLE_SLAVE) {
2252 switch (flags & OFPC_FRAG_MASK) {
2253 case OFPC_FRAG_NORMAL:
2254 dpif_set_drop_frags(p->dpif, false);
2256 case OFPC_FRAG_DROP:
2257 dpif_set_drop_frags(p->dpif, true);
2260 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
2266 ofconn->miss_send_len = ntohs(osc->miss_send_len);
2272 add_output_group_action(struct odp_actions *actions, uint16_t group,
2273 uint16_t *nf_output_iface)
2275 odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
2277 if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
2278 *nf_output_iface = NF_OUT_FLOOD;
2283 add_controller_action(struct odp_actions *actions,
2284 const struct ofp_action_output *oao)
2286 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
2287 a->controller.arg = ntohs(oao->max_len);
2290 struct action_xlate_ctx {
2292 flow_t flow; /* Flow to which these actions correspond. */
2293 int recurse; /* Recursion level, via xlate_table_action. */
2294 struct ofproto *ofproto;
2295 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
2296 * null pointer if we are revalidating
2297 * without a packet to refer to. */
2300 struct odp_actions *out; /* Datapath actions. */
2301 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
2302 bool may_set_up_flow; /* True ordinarily; false if the actions must
2303 * be reassessed for every packet. */
2304 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
2307 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2308 struct action_xlate_ctx *ctx);
2311 add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2313 const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
2316 if (ofport->opp.config & OFPPC_NO_FWD) {
2317 /* Forwarding disabled on port. */
2322 * We don't have an ofport record for this port, but it doesn't hurt to
2323 * allow forwarding to it anyhow. Maybe such a port will appear later
2324 * and we're pre-populating the flow table.
2328 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
2329 ctx->nf_output_iface = port;
2332 static struct rule *
2333 lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
2336 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
2338 /* The rule we found might not be valid, since we could be in need of
2339 * revalidation. If it is not valid, don't return it. */
2342 && ofproto->need_revalidate
2343 && !revalidate_rule(ofproto, rule)) {
2344 COVERAGE_INC(ofproto_invalidated);
2352 xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2354 if (!ctx->recurse) {
2355 uint16_t old_in_port;
2358 /* Look up a flow with 'in_port' as the input port. Then restore the
2359 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2360 * have surprising behavior). */
2361 old_in_port = ctx->flow.in_port;
2362 ctx->flow.in_port = in_port;
2363 rule = lookup_valid_rule(ctx->ofproto, &ctx->flow);
2364 ctx->flow.in_port = old_in_port;
2372 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2379 xlate_output_action(struct action_xlate_ctx *ctx,
2380 const struct ofp_action_output *oao)
2383 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2385 ctx->nf_output_iface = NF_OUT_DROP;
2387 switch (ntohs(oao->port)) {
2389 add_output_action(ctx, ctx->flow.in_port);
2392 xlate_table_action(ctx, ctx->flow.in_port);
2395 if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet,
2396 ctx->out, ctx->tags,
2397 &ctx->nf_output_iface,
2398 ctx->ofproto->aux)) {
2399 COVERAGE_INC(ofproto_uninstallable);
2400 ctx->may_set_up_flow = false;
2404 add_output_group_action(ctx->out, DP_GROUP_FLOOD,
2405 &ctx->nf_output_iface);
2408 add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
2410 case OFPP_CONTROLLER:
2411 add_controller_action(ctx->out, oao);
2414 add_output_action(ctx, ODPP_LOCAL);
2417 odp_port = ofp_port_to_odp_port(ntohs(oao->port));
2418 if (odp_port != ctx->flow.in_port) {
2419 add_output_action(ctx, odp_port);
2424 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2425 ctx->nf_output_iface = NF_OUT_FLOOD;
2426 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2427 ctx->nf_output_iface = prev_nf_output_iface;
2428 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2429 ctx->nf_output_iface != NF_OUT_FLOOD) {
2430 ctx->nf_output_iface = NF_OUT_MULTI;
2434 /* If the final ODP action in 'ctx' is "pop priority", drop it, as an
2435 * optimization, because we're going to add another action that sets the
2436 * priority immediately after, or because there are no actions following the
2439 remove_pop_action(struct action_xlate_ctx *ctx)
2441 size_t n = ctx->out->n_actions;
2442 if (n > 0 && ctx->out->actions[n - 1].type == ODPAT_POP_PRIORITY) {
2443 ctx->out->n_actions--;
2448 xlate_enqueue_action(struct action_xlate_ctx *ctx,
2449 const struct ofp_action_enqueue *oae)
2451 uint16_t ofp_port, odp_port;
2453 /* Figure out ODP output port. */
2454 ofp_port = ntohs(oae->port);
2455 if (ofp_port != OFPP_IN_PORT) {
2456 odp_port = ofp_port_to_odp_port(ofp_port);
2458 odp_port = ctx->flow.in_port;
2461 /* Add ODP actions. */
2462 remove_pop_action(ctx);
2463 odp_actions_add(ctx->out, ODPAT_SET_PRIORITY)->priority.priority
2464 = TC_H_MAKE(1, ntohl(oae->queue_id)); /* XXX */
2465 add_output_action(ctx, odp_port);
2466 odp_actions_add(ctx->out, ODPAT_POP_PRIORITY);
2468 /* Update NetFlow output port. */
2469 if (ctx->nf_output_iface == NF_OUT_DROP) {
2470 ctx->nf_output_iface = odp_port;
2471 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
2472 ctx->nf_output_iface = NF_OUT_MULTI;
2477 xlate_nicira_action(struct action_xlate_ctx *ctx,
2478 const struct nx_action_header *nah)
2480 const struct nx_action_resubmit *nar;
2481 const struct nx_action_set_tunnel *nast;
2482 union odp_action *oa;
2483 int subtype = ntohs(nah->subtype);
2485 assert(nah->vendor == htonl(NX_VENDOR_ID));
2487 case NXAST_RESUBMIT:
2488 nar = (const struct nx_action_resubmit *) nah;
2489 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2492 case NXAST_SET_TUNNEL:
2493 nast = (const struct nx_action_set_tunnel *) nah;
2494 oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL);
2495 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
2498 /* If you add a new action here that modifies flow data, don't forget to
2499 * update the flow key in ctx->flow at the same time. */
2502 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2508 do_xlate_actions(const union ofp_action *in, size_t n_in,
2509 struct action_xlate_ctx *ctx)
2511 struct actions_iterator iter;
2512 const union ofp_action *ia;
2513 const struct ofport *port;
2515 port = port_array_get(&ctx->ofproto->ports, ctx->flow.in_port);
2516 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2517 port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, stp_eth_addr)
2518 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2519 /* Drop this flow. */
2523 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2524 uint16_t type = ntohs(ia->type);
2525 union odp_action *oa;
2529 xlate_output_action(ctx, &ia->output);
2532 case OFPAT_SET_VLAN_VID:
2533 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID);
2534 ctx->flow.dl_vlan = oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid;
2537 case OFPAT_SET_VLAN_PCP:
2538 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP);
2539 ctx->flow.dl_vlan_pcp = oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp;
2542 case OFPAT_STRIP_VLAN:
2543 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2544 ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
2545 ctx->flow.dl_vlan_pcp = 0;
2548 case OFPAT_SET_DL_SRC:
2549 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2550 memcpy(oa->dl_addr.dl_addr,
2551 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2552 memcpy(ctx->flow.dl_src,
2553 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2556 case OFPAT_SET_DL_DST:
2557 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2558 memcpy(oa->dl_addr.dl_addr,
2559 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2560 memcpy(ctx->flow.dl_dst,
2561 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2564 case OFPAT_SET_NW_SRC:
2565 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2566 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2569 case OFPAT_SET_NW_DST:
2570 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2571 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2574 case OFPAT_SET_NW_TOS:
2575 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
2576 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2579 case OFPAT_SET_TP_SRC:
2580 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2581 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
2584 case OFPAT_SET_TP_DST:
2585 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2586 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
2590 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2594 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
2598 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2605 xlate_actions(const union ofp_action *in, size_t n_in,
2606 const flow_t *flow, struct ofproto *ofproto,
2607 const struct ofpbuf *packet,
2608 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2609 uint16_t *nf_output_iface)
2611 tag_type no_tags = 0;
2612 struct action_xlate_ctx ctx;
2613 COVERAGE_INC(ofproto_ofp2odp);
2614 odp_actions_init(out);
2617 ctx.ofproto = ofproto;
2618 ctx.packet = packet;
2620 ctx.tags = tags ? tags : &no_tags;
2621 ctx.may_set_up_flow = true;
2622 ctx.nf_output_iface = NF_OUT_DROP;
2623 do_xlate_actions(in, n_in, &ctx);
2624 remove_pop_action(&ctx);
2626 /* Check with in-band control to see if we're allowed to set up this
2628 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
2629 ctx.may_set_up_flow = false;
2632 if (may_set_up_flow) {
2633 *may_set_up_flow = ctx.may_set_up_flow;
2635 if (nf_output_iface) {
2636 *nf_output_iface = ctx.nf_output_iface;
2638 if (odp_actions_overflow(out)) {
2639 odp_actions_init(out);
2640 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2645 /* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow
2646 * error message code (composed with ofp_mkerr()) for the caller to propagate
2647 * upward. Otherwise, returns 0.
2649 * 'oh' is used to make log messages more informative. */
2651 reject_slave_controller(struct ofconn *ofconn, const struct ofp_header *oh)
2653 if (ofconn->type == OFCONN_CONTROLLER && ofconn->role == NX_ROLE_SLAVE) {
2654 static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
2657 type_name = ofp_message_type_to_string(oh->type);
2658 VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
2662 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
2669 handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
2670 struct ofp_header *oh)
2672 struct ofp_packet_out *opo;
2673 struct ofpbuf payload, *buffer;
2674 struct odp_actions actions;
2680 error = reject_slave_controller(ofconn, oh);
2685 error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports);
2689 opo = (struct ofp_packet_out *) oh;
2691 COVERAGE_INC(ofproto_packet_out);
2692 if (opo->buffer_id != htonl(UINT32_MAX)) {
2693 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2695 if (error || !buffer) {
2703 flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
2704 error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
2705 &flow, p, &payload, &actions, NULL, NULL, NULL);
2710 dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions,
2712 ofpbuf_delete(buffer);
2718 update_port_config(struct ofproto *p, struct ofport *port,
2719 uint32_t config, uint32_t mask)
2721 mask &= config ^ port->opp.config;
2722 if (mask & OFPPC_PORT_DOWN) {
2723 if (config & OFPPC_PORT_DOWN) {
2724 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
2726 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
2729 #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
2730 if (mask & REVALIDATE_BITS) {
2731 COVERAGE_INC(ofproto_costly_flags);
2732 port->opp.config ^= mask & REVALIDATE_BITS;
2733 p->need_revalidate = true;
2735 #undef REVALIDATE_BITS
2736 if (mask & OFPPC_NO_FLOOD) {
2737 port->opp.config ^= OFPPC_NO_FLOOD;
2738 refresh_port_groups(p);
2740 if (mask & OFPPC_NO_PACKET_IN) {
2741 port->opp.config ^= OFPPC_NO_PACKET_IN;
2746 handle_port_mod(struct ofproto *p, struct ofconn *ofconn,
2747 struct ofp_header *oh)
2749 const struct ofp_port_mod *opm;
2750 struct ofport *port;
2753 error = reject_slave_controller(ofconn, oh);
2757 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
2761 opm = (struct ofp_port_mod *) oh;
2763 port = port_array_get(&p->ports,
2764 ofp_port_to_odp_port(ntohs(opm->port_no)));
2766 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
2767 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
2768 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
2770 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
2771 if (opm->advertise) {
2772 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
2778 static struct ofpbuf *
2779 make_stats_reply(uint32_t xid, uint16_t type, size_t body_len)
2781 struct ofp_stats_reply *osr;
2784 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
2785 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
2787 osr->flags = htons(0);
2791 static struct ofpbuf *
2792 start_stats_reply(const struct ofp_stats_request *request, size_t body_len)
2794 return make_stats_reply(request->header.xid, request->type, body_len);
2798 append_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofpbuf **msgp)
2800 struct ofpbuf *msg = *msgp;
2801 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
2802 if (nbytes + msg->size > UINT16_MAX) {
2803 struct ofp_stats_reply *reply = msg->data;
2804 reply->flags = htons(OFPSF_REPLY_MORE);
2805 *msgp = make_stats_reply(reply->header.xid, reply->type, nbytes);
2806 queue_tx(msg, ofconn, ofconn->reply_counter);
2808 return ofpbuf_put_uninit(*msgp, nbytes);
2812 handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn,
2813 struct ofp_stats_request *request)
2815 struct ofp_desc_stats *ods;
2818 msg = start_stats_reply(request, sizeof *ods);
2819 ods = append_stats_reply(sizeof *ods, ofconn, &msg);
2820 memset(ods, 0, sizeof *ods);
2821 ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc);
2822 ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc);
2823 ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
2824 ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
2825 ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
2826 queue_tx(msg, ofconn, ofconn->reply_counter);
2832 count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
2834 struct rule *rule = rule_from_cls_rule(cls_rule);
2835 int *n_subrules = n_subrules_;
2843 handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
2844 struct ofp_stats_request *request)
2846 struct ofp_table_stats *ots;
2848 struct odp_stats dpstats;
2849 int n_exact, n_subrules, n_wild;
2851 msg = start_stats_reply(request, sizeof *ots * 2);
2853 /* Count rules of various kinds. */
2855 classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
2856 n_exact = classifier_count_exact(&p->cls) - n_subrules;
2857 n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
2860 dpif_get_dp_stats(p->dpif, &dpstats);
2861 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2862 memset(ots, 0, sizeof *ots);
2863 ots->table_id = TABLEID_HASH;
2864 strcpy(ots->name, "hash");
2865 ots->wildcards = htonl(0);
2866 ots->max_entries = htonl(dpstats.max_capacity);
2867 ots->active_count = htonl(n_exact);
2868 ots->lookup_count = htonll(dpstats.n_frags + dpstats.n_hit +
2870 ots->matched_count = htonll(dpstats.n_hit); /* XXX */
2872 /* Classifier table. */
2873 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2874 memset(ots, 0, sizeof *ots);
2875 ots->table_id = TABLEID_CLASSIFIER;
2876 strcpy(ots->name, "classifier");
2877 ots->wildcards = p->tun_id_from_cookie ? htonl(OVSFW_ALL)
2879 ots->max_entries = htonl(65536);
2880 ots->active_count = htonl(n_wild);
2881 ots->lookup_count = htonll(0); /* XXX */
2882 ots->matched_count = htonll(0); /* XXX */
2884 queue_tx(msg, ofconn, ofconn->reply_counter);
2889 append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
2890 struct ofpbuf **msgp)
2892 struct netdev_stats stats;
2893 struct ofp_port_stats *ops;
2895 /* Intentionally ignore return value, since errors will set
2896 * 'stats' to all-1s, which is correct for OpenFlow, and
2897 * netdev_get_stats() will log errors. */
2898 netdev_get_stats(port->netdev, &stats);
2900 ops = append_stats_reply(sizeof *ops, ofconn, msgp);
2901 ops->port_no = htons(odp_port_to_ofp_port(port_no));
2902 memset(ops->pad, 0, sizeof ops->pad);
2903 ops->rx_packets = htonll(stats.rx_packets);
2904 ops->tx_packets = htonll(stats.tx_packets);
2905 ops->rx_bytes = htonll(stats.rx_bytes);
2906 ops->tx_bytes = htonll(stats.tx_bytes);
2907 ops->rx_dropped = htonll(stats.rx_dropped);
2908 ops->tx_dropped = htonll(stats.tx_dropped);
2909 ops->rx_errors = htonll(stats.rx_errors);
2910 ops->tx_errors = htonll(stats.tx_errors);
2911 ops->rx_frame_err = htonll(stats.rx_frame_errors);
2912 ops->rx_over_err = htonll(stats.rx_over_errors);
2913 ops->rx_crc_err = htonll(stats.rx_crc_errors);
2914 ops->collisions = htonll(stats.collisions);
2918 handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn,
2919 struct ofp_stats_request *osr,
2922 struct ofp_port_stats_request *psr;
2923 struct ofp_port_stats *ops;
2925 struct ofport *port;
2926 unsigned int port_no;
2928 if (arg_size != sizeof *psr) {
2929 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
2931 psr = (struct ofp_port_stats_request *) osr->body;
2933 msg = start_stats_reply(osr, sizeof *ops * 16);
2934 if (psr->port_no != htons(OFPP_NONE)) {
2935 port = port_array_get(&p->ports,
2936 ofp_port_to_odp_port(ntohs(psr->port_no)));
2938 append_port_stat(port, ntohs(psr->port_no), ofconn, &msg);
2941 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2942 append_port_stat(port, port_no, ofconn, &msg);
2946 queue_tx(msg, ofconn, ofconn->reply_counter);
2950 struct flow_stats_cbdata {
2951 struct ofproto *ofproto;
2952 struct ofconn *ofconn;
2957 /* Obtains statistic counters for 'rule' within 'p' and stores them into
2958 * '*packet_countp' and '*byte_countp'. If 'rule' is a wildcarded rule, the
2959 * returned statistic include statistics for all of 'rule''s subrules. */
2961 query_stats(struct ofproto *p, struct rule *rule,
2962 uint64_t *packet_countp, uint64_t *byte_countp)
2964 uint64_t packet_count, byte_count;
2965 struct rule *subrule;
2966 struct odp_flow *odp_flows;
2969 /* Start from historical data for 'rule' itself that are no longer tracked
2970 * by the datapath. This counts, for example, subrules that have
2972 packet_count = rule->packet_count;
2973 byte_count = rule->byte_count;
2975 /* Prepare to ask the datapath for statistics on 'rule', or if it is
2976 * wildcarded then on all of its subrules.
2978 * Also, add any statistics that are not tracked by the datapath for each
2979 * subrule. This includes, for example, statistics for packets that were
2980 * executed "by hand" by ofproto via dpif_execute() but must be accounted
2982 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
2983 odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
2984 if (rule->cr.wc.wildcards) {
2986 LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
2987 odp_flows[i++].key = subrule->cr.flow;
2988 packet_count += subrule->packet_count;
2989 byte_count += subrule->byte_count;
2992 odp_flows[0].key = rule->cr.flow;
2995 /* Fetch up-to-date statistics from the datapath and add them in. */
2996 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
2998 for (i = 0; i < n_odp_flows; i++) {
2999 struct odp_flow *odp_flow = &odp_flows[i];
3000 packet_count += odp_flow->stats.n_packets;
3001 byte_count += odp_flow->stats.n_bytes;
3006 /* Return the stats to the caller. */
3007 *packet_countp = packet_count;
3008 *byte_countp = byte_count;
3012 flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
3014 struct rule *rule = rule_from_cls_rule(rule_);
3015 struct flow_stats_cbdata *cbdata = cbdata_;
3016 struct ofp_flow_stats *ofs;
3017 uint64_t packet_count, byte_count;
3018 size_t act_len, len;
3019 long long int tdiff = time_msec() - rule->created;
3020 uint32_t sec = tdiff / 1000;
3021 uint32_t msec = tdiff - (sec * 1000);
3023 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3027 act_len = sizeof *rule->actions * rule->n_actions;
3028 len = offsetof(struct ofp_flow_stats, actions) + act_len;
3030 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3032 ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
3033 ofs->length = htons(len);
3034 ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
3036 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3037 cbdata->ofproto->tun_id_from_cookie, &ofs->match);
3038 ofs->duration_sec = htonl(sec);
3039 ofs->duration_nsec = htonl(msec * 1000000);
3040 ofs->cookie = rule->flow_cookie;
3041 ofs->priority = htons(rule->cr.priority);
3042 ofs->idle_timeout = htons(rule->idle_timeout);
3043 ofs->hard_timeout = htons(rule->hard_timeout);
3044 memset(ofs->pad2, 0, sizeof ofs->pad2);
3045 ofs->packet_count = htonll(packet_count);
3046 ofs->byte_count = htonll(byte_count);
3047 memcpy(ofs->actions, rule->actions, act_len);
3051 table_id_to_include(uint8_t table_id)
3053 return (table_id == TABLEID_HASH ? CLS_INC_EXACT
3054 : table_id == TABLEID_CLASSIFIER ? CLS_INC_WILD
3055 : table_id == 0xff ? CLS_INC_ALL
3060 handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn,
3061 const struct ofp_stats_request *osr,
3064 struct ofp_flow_stats_request *fsr;
3065 struct flow_stats_cbdata cbdata;
3066 struct cls_rule target;
3068 if (arg_size != sizeof *fsr) {
3069 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3071 fsr = (struct ofp_flow_stats_request *) osr->body;
3073 COVERAGE_INC(ofproto_flows_req);
3075 cbdata.ofconn = ofconn;
3076 cbdata.out_port = fsr->out_port;
3077 cbdata.msg = start_stats_reply(osr, 1024);
3078 cls_rule_from_match(&fsr->match, 0, false, 0, &target);
3079 classifier_for_each_match(&p->cls, &target,
3080 table_id_to_include(fsr->table_id),
3081 flow_stats_cb, &cbdata);
3082 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3086 struct flow_stats_ds_cbdata {
3087 struct ofproto *ofproto;
3092 flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
3094 struct rule *rule = rule_from_cls_rule(rule_);
3095 struct flow_stats_ds_cbdata *cbdata = cbdata_;
3096 struct ds *results = cbdata->results;
3097 struct ofp_match match;
3098 uint64_t packet_count, byte_count;
3099 size_t act_len = sizeof *rule->actions * rule->n_actions;
3101 /* Don't report on subrules. */
3102 if (rule->super != NULL) {
3106 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3107 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3108 cbdata->ofproto->tun_id_from_cookie, &match);
3110 ds_put_format(results, "duration=%llds, ",
3111 (time_msec() - rule->created) / 1000);
3112 ds_put_format(results, "priority=%u, ", rule->cr.priority);
3113 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
3114 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
3115 ofp_print_match(results, &match, true);
3116 ofp_print_actions(results, &rule->actions->header, act_len);
3117 ds_put_cstr(results, "\n");
3120 /* Adds a pretty-printed description of all flows to 'results', including
3121 * those marked hidden by secchan (e.g., by in-band control). */
3123 ofproto_get_all_flows(struct ofproto *p, struct ds *results)
3125 struct ofp_match match;
3126 struct cls_rule target;
3127 struct flow_stats_ds_cbdata cbdata;
3129 memset(&match, 0, sizeof match);
3130 match.wildcards = htonl(OVSFW_ALL);
3133 cbdata.results = results;
3135 cls_rule_from_match(&match, 0, false, 0, &target);
3136 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3137 flow_stats_ds_cb, &cbdata);
3140 struct aggregate_stats_cbdata {
3141 struct ofproto *ofproto;
3143 uint64_t packet_count;
3144 uint64_t byte_count;
3149 aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
3151 struct rule *rule = rule_from_cls_rule(rule_);
3152 struct aggregate_stats_cbdata *cbdata = cbdata_;
3153 uint64_t packet_count, byte_count;
3155 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3159 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3161 cbdata->packet_count += packet_count;
3162 cbdata->byte_count += byte_count;
3167 handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn,
3168 const struct ofp_stats_request *osr,
3171 struct ofp_aggregate_stats_request *asr;
3172 struct ofp_aggregate_stats_reply *reply;
3173 struct aggregate_stats_cbdata cbdata;
3174 struct cls_rule target;
3177 if (arg_size != sizeof *asr) {
3178 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3180 asr = (struct ofp_aggregate_stats_request *) osr->body;
3182 COVERAGE_INC(ofproto_agg_request);
3184 cbdata.out_port = asr->out_port;
3185 cbdata.packet_count = 0;
3186 cbdata.byte_count = 0;
3188 cls_rule_from_match(&asr->match, 0, false, 0, &target);
3189 classifier_for_each_match(&p->cls, &target,
3190 table_id_to_include(asr->table_id),
3191 aggregate_stats_cb, &cbdata);
3193 msg = start_stats_reply(osr, sizeof *reply);
3194 reply = append_stats_reply(sizeof *reply, ofconn, &msg);
3195 reply->flow_count = htonl(cbdata.n_flows);
3196 reply->packet_count = htonll(cbdata.packet_count);
3197 reply->byte_count = htonll(cbdata.byte_count);
3198 queue_tx(msg, ofconn, ofconn->reply_counter);
3202 struct queue_stats_cbdata {
3203 struct ofconn *ofconn;
3209 put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
3210 const struct netdev_queue_stats *stats)
3212 struct ofp_queue_stats *reply;
3214 reply = append_stats_reply(sizeof *reply, cbdata->ofconn, &cbdata->msg);
3215 reply->port_no = htons(cbdata->port_no);
3216 memset(reply->pad, 0, sizeof reply->pad);
3217 reply->queue_id = htonl(queue_id);
3218 reply->tx_bytes = htonll(stats->tx_bytes);
3219 reply->tx_packets = htonll(stats->tx_packets);
3220 reply->tx_errors = htonll(stats->tx_errors);
3224 handle_queue_stats_dump_cb(uint32_t queue_id,
3225 struct netdev_queue_stats *stats,
3228 struct queue_stats_cbdata *cbdata = cbdata_;
3230 put_queue_stats(cbdata, queue_id, stats);
3234 handle_queue_stats_for_port(struct ofport *port, uint16_t port_no,
3236 struct queue_stats_cbdata *cbdata)
3238 cbdata->port_no = port_no;
3239 if (queue_id == OFPQ_ALL) {
3240 netdev_dump_queue_stats(port->netdev,
3241 handle_queue_stats_dump_cb, cbdata);
3243 struct netdev_queue_stats stats;
3245 netdev_get_queue_stats(port->netdev, queue_id, &stats);
3246 put_queue_stats(cbdata, queue_id, &stats);
3251 handle_queue_stats_request(struct ofproto *ofproto, struct ofconn *ofconn,
3252 const struct ofp_stats_request *osr,
3255 struct ofp_queue_stats_request *qsr;
3256 struct queue_stats_cbdata cbdata;
3257 struct ofport *port;
3258 unsigned int port_no;
3261 if (arg_size != sizeof *qsr) {
3262 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3264 qsr = (struct ofp_queue_stats_request *) osr->body;
3266 COVERAGE_INC(ofproto_queue_req);
3268 cbdata.ofconn = ofconn;
3269 cbdata.msg = start_stats_reply(osr, 128);
3271 port_no = ntohs(qsr->port_no);
3272 queue_id = ntohl(qsr->queue_id);
3273 if (port_no == OFPP_ALL) {
3274 PORT_ARRAY_FOR_EACH (port, &ofproto->ports, port_no) {
3275 handle_queue_stats_for_port(port, port_no, queue_id, &cbdata);
3277 } else if (port_no < ofproto->max_ports) {
3278 port = port_array_get(&ofproto->ports, port_no);
3280 handle_queue_stats_for_port(port, port_no, queue_id, &cbdata);
3283 ofpbuf_delete(cbdata.msg);
3284 return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
3286 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3292 handle_stats_request(struct ofproto *p, struct ofconn *ofconn,
3293 struct ofp_header *oh)
3295 struct ofp_stats_request *osr;
3299 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
3304 osr = (struct ofp_stats_request *) oh;
3306 switch (ntohs(osr->type)) {
3308 return handle_desc_stats_request(p, ofconn, osr);
3311 return handle_flow_stats_request(p, ofconn, osr, arg_size);
3313 case OFPST_AGGREGATE:
3314 return handle_aggregate_stats_request(p, ofconn, osr, arg_size);
3317 return handle_table_stats_request(p, ofconn, osr);
3320 return handle_port_stats_request(p, ofconn, osr, arg_size);
3323 return handle_queue_stats_request(p, ofconn, osr, arg_size);
3326 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3329 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
3333 static long long int
3334 msec_from_nsec(uint64_t sec, uint32_t nsec)
3336 return !sec ? 0 : sec * 1000 + nsec / 1000000;
3340 update_time(struct ofproto *ofproto, struct rule *rule,
3341 const struct odp_flow_stats *stats)
3343 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
3344 if (used > rule->used) {
3346 if (rule->super && used > rule->super->used) {
3347 rule->super->used = used;
3349 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
3354 update_stats(struct ofproto *ofproto, struct rule *rule,
3355 const struct odp_flow_stats *stats)
3357 if (stats->n_packets) {
3358 update_time(ofproto, rule, stats);
3359 rule->packet_count += stats->n_packets;
3360 rule->byte_count += stats->n_bytes;
3361 netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
3366 /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
3367 * in which no matching flow already exists in the flow table.
3369 * Adds the flow specified by 'ofm', which is followed by 'n_actions'
3370 * ofp_actions, to 'p''s flow table. Returns 0 on success or an OpenFlow error
3371 * code as encoded by ofp_mkerr() on failure.
3373 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3376 add_flow(struct ofproto *p, struct ofconn *ofconn,
3377 const struct ofp_flow_mod *ofm, size_t n_actions)
3379 struct ofpbuf *packet;
3384 if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
3388 flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
3390 if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
3391 ntohs(ofm->priority))) {
3392 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
3396 rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
3397 n_actions, ntohs(ofm->idle_timeout),
3398 ntohs(ofm->hard_timeout), ofm->cookie,
3399 ofm->flags & htons(OFPFF_SEND_FLOW_REM));
3400 cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
3401 p->tun_id_from_cookie, ofm->cookie, &rule->cr);
3404 if (ofm->buffer_id != htonl(UINT32_MAX)) {
3405 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
3409 in_port = UINT16_MAX;
3412 rule_insert(p, rule, packet, in_port);
3413 ofpbuf_delete(packet);
3417 static struct rule *
3418 find_flow_strict(struct ofproto *p, const struct ofp_flow_mod *ofm)
3423 flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
3425 return rule_from_cls_rule(classifier_find_rule_exactly(
3426 &p->cls, &flow, wildcards,
3427 ntohs(ofm->priority)));
3431 send_buffered_packet(struct ofproto *ofproto, struct ofconn *ofconn,
3432 struct rule *rule, const struct ofp_flow_mod *ofm)
3434 struct ofpbuf *packet;
3439 if (ofm->buffer_id == htonl(UINT32_MAX)) {
3443 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
3449 flow_extract(packet, 0, in_port, &flow);
3450 rule_execute(ofproto, rule, packet, &flow);
3451 ofpbuf_delete(packet);
3456 /* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
3458 struct modify_flows_cbdata {
3459 struct ofproto *ofproto;
3460 const struct ofp_flow_mod *ofm;
3465 static int modify_flow(struct ofproto *, const struct ofp_flow_mod *,
3466 size_t n_actions, struct rule *);
3467 static void modify_flows_cb(struct cls_rule *, void *cbdata_);
3469 /* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
3470 * encoded by ofp_mkerr() on failure.
3472 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3475 modify_flows_loose(struct ofproto *p, struct ofconn *ofconn,
3476 const struct ofp_flow_mod *ofm, size_t n_actions)
3478 struct modify_flows_cbdata cbdata;
3479 struct cls_rule target;
3483 cbdata.n_actions = n_actions;
3484 cbdata.match = NULL;
3486 cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
3489 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3490 modify_flows_cb, &cbdata);
3492 /* This credits the packet to whichever flow happened to happened to
3493 * match last. That's weird. Maybe we should do a lookup for the
3494 * flow that actually matches the packet? Who knows. */
3495 send_buffered_packet(p, ofconn, cbdata.match, ofm);
3498 return add_flow(p, ofconn, ofm, n_actions);
3502 /* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
3503 * code as encoded by ofp_mkerr() on failure.
3505 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3508 modify_flow_strict(struct ofproto *p, struct ofconn *ofconn,
3509 struct ofp_flow_mod *ofm, size_t n_actions)
3511 struct rule *rule = find_flow_strict(p, ofm);
3512 if (rule && !rule_is_hidden(rule)) {
3513 modify_flow(p, ofm, n_actions, rule);
3514 return send_buffered_packet(p, ofconn, rule, ofm);
3516 return add_flow(p, ofconn, ofm, n_actions);
3520 /* Callback for modify_flows_loose(). */
3522 modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
3524 struct rule *rule = rule_from_cls_rule(rule_);
3525 struct modify_flows_cbdata *cbdata = cbdata_;
3527 if (!rule_is_hidden(rule)) {
3528 cbdata->match = rule;
3529 modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions, rule);
3533 /* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
3534 * been identified as a flow in 'p''s flow table to be modified, by changing
3535 * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
3536 * ofp_action[] structures). */
3538 modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
3539 size_t n_actions, struct rule *rule)
3541 size_t actions_len = n_actions * sizeof *rule->actions;
3543 rule->flow_cookie = ofm->cookie;
3545 /* If the actions are the same, do nothing. */
3546 if (n_actions == rule->n_actions
3547 && !memcmp(ofm->actions, rule->actions, actions_len))
3552 /* Replace actions. */
3553 free(rule->actions);
3554 rule->actions = xmemdup(ofm->actions, actions_len);
3555 rule->n_actions = n_actions;
3557 /* Make sure that the datapath gets updated properly. */
3558 if (rule->cr.wc.wildcards) {
3559 COVERAGE_INC(ofproto_mod_wc_flow);
3560 p->need_revalidate = true;
3562 rule_update_actions(p, rule);
3568 /* OFPFC_DELETE implementation. */
3570 struct delete_flows_cbdata {
3571 struct ofproto *ofproto;
3575 static void delete_flows_cb(struct cls_rule *, void *cbdata_);
3576 static void delete_flow(struct ofproto *, struct rule *, uint16_t out_port);
3578 /* Implements OFPFC_DELETE. */
3580 delete_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm)
3582 struct delete_flows_cbdata cbdata;
3583 struct cls_rule target;
3586 cbdata.out_port = ofm->out_port;
3588 cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
3591 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3592 delete_flows_cb, &cbdata);
3595 /* Implements OFPFC_DELETE_STRICT. */
3597 delete_flow_strict(struct ofproto *p, struct ofp_flow_mod *ofm)
3599 struct rule *rule = find_flow_strict(p, ofm);
3601 delete_flow(p, rule, ofm->out_port);
3605 /* Callback for delete_flows_loose(). */
3607 delete_flows_cb(struct cls_rule *rule_, void *cbdata_)
3609 struct rule *rule = rule_from_cls_rule(rule_);
3610 struct delete_flows_cbdata *cbdata = cbdata_;
3612 delete_flow(cbdata->ofproto, rule, cbdata->out_port);
3615 /* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
3616 * been identified as a flow to delete from 'p''s flow table, by deleting the
3617 * flow and sending out a OFPT_FLOW_REMOVED message to any interested
3620 * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
3621 * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
3622 * specified 'out_port'. */
3624 delete_flow(struct ofproto *p, struct rule *rule, uint16_t out_port)
3626 if (rule_is_hidden(rule)) {
3630 if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
3634 send_flow_removed(p, rule, time_msec(), OFPRR_DELETE);
3635 rule_remove(p, rule);
3639 handle_flow_mod(struct ofproto *p, struct ofconn *ofconn,
3640 struct ofp_flow_mod *ofm)
3642 struct ofp_match orig_match;
3646 error = reject_slave_controller(ofconn, &ofm->header);
3650 error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm,
3651 sizeof *ofm->actions, &n_actions);
3656 /* We do not support the emergency flow cache. It will hopefully
3657 * get dropped from OpenFlow in the near future. */
3658 if (ofm->flags & htons(OFPFF_EMERG)) {
3659 /* There isn't a good fit for an error code, so just state that the
3660 * flow table is full. */
3661 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
3664 /* Normalize ofp->match. If normalization actually changes anything, then
3665 * log the differences. */
3666 ofm->match.pad1[0] = ofm->match.pad2[0] = 0;
3667 orig_match = ofm->match;
3668 normalize_match(&ofm->match);
3669 if (memcmp(&ofm->match, &orig_match, sizeof orig_match)) {
3670 static struct vlog_rate_limit normal_rl = VLOG_RATE_LIMIT_INIT(1, 1);
3671 if (!VLOG_DROP_INFO(&normal_rl)) {
3672 char *old = ofp_match_to_literal_string(&orig_match);
3673 char *new = ofp_match_to_literal_string(&ofm->match);
3674 VLOG_INFO("%s: normalization changed ofp_match, details:",
3675 rconn_get_name(ofconn->rconn));
3676 VLOG_INFO(" pre: %s", old);
3677 VLOG_INFO("post: %s", new);
3683 if (!ofm->match.wildcards) {
3684 ofm->priority = htons(UINT16_MAX);
3687 error = validate_actions((const union ofp_action *) ofm->actions,
3688 n_actions, p->max_ports);
3693 switch (ntohs(ofm->command)) {
3695 return add_flow(p, ofconn, ofm, n_actions);
3698 return modify_flows_loose(p, ofconn, ofm, n_actions);
3700 case OFPFC_MODIFY_STRICT:
3701 return modify_flow_strict(p, ofconn, ofm, n_actions);
3704 delete_flows_loose(p, ofm);
3707 case OFPFC_DELETE_STRICT:
3708 delete_flow_strict(p, ofm);
3712 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
3717 handle_tun_id_from_cookie(struct ofproto *p, struct nxt_tun_id_cookie *msg)
3721 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
3726 p->tun_id_from_cookie = !!msg->set;
3731 handle_role_request(struct ofproto *ofproto,
3732 struct ofconn *ofconn, struct nicira_header *msg)
3734 struct nx_role_request *nrr;
3735 struct nx_role_request *reply;
3739 if (ntohs(msg->header.length) != sizeof *nrr) {
3740 VLOG_WARN_RL(&rl, "received role request of length %u (expected %zu)",
3741 ntohs(msg->header.length), sizeof *nrr);
3742 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3744 nrr = (struct nx_role_request *) msg;
3746 if (ofconn->type != OFCONN_CONTROLLER) {
3747 VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
3749 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
3752 role = ntohl(nrr->role);
3753 if (role != NX_ROLE_OTHER && role != NX_ROLE_MASTER
3754 && role != NX_ROLE_SLAVE) {
3755 VLOG_WARN_RL(&rl, "received request for unknown role %"PRIu32, role);
3757 /* There's no good error code for this. */
3758 return ofp_mkerr(OFPET_BAD_REQUEST, -1);
3761 if (role == NX_ROLE_MASTER) {
3762 struct ofconn *other;
3764 HMAP_FOR_EACH (other, struct ofconn, hmap_node,
3765 &ofproto->controllers) {
3766 if (other->role == NX_ROLE_MASTER) {
3767 other->role = NX_ROLE_SLAVE;
3771 ofconn->role = role;
3773 reply = make_openflow_xid(sizeof *reply, OFPT_VENDOR, msg->header.xid,
3775 reply->nxh.vendor = htonl(NX_VENDOR_ID);
3776 reply->nxh.subtype = htonl(NXT_ROLE_REPLY);
3777 reply->role = htonl(role);
3778 queue_tx(buf, ofconn, ofconn->reply_counter);
3784 handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
3786 struct ofp_vendor_header *ovh = msg;
3787 struct nicira_header *nh;
3789 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
3790 VLOG_WARN_RL(&rl, "received vendor message of length %u "
3791 "(expected at least %zu)",
3792 ntohs(ovh->header.length), sizeof(struct ofp_vendor_header));
3793 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3795 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
3796 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3798 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
3799 VLOG_WARN_RL(&rl, "received Nicira vendor message of length %u "
3800 "(expected at least %zu)",
3801 ntohs(ovh->header.length), sizeof(struct nicira_header));
3802 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3806 switch (ntohl(nh->subtype)) {
3807 case NXT_STATUS_REQUEST:
3808 return switch_status_handle_request(p->switch_status, ofconn->rconn,
3811 case NXT_TUN_ID_FROM_COOKIE:
3812 return handle_tun_id_from_cookie(p, msg);
3814 case NXT_ROLE_REQUEST:
3815 return handle_role_request(p, ofconn, msg);
3818 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3822 handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh)
3824 struct ofp_header *ob;
3827 /* Currently, everything executes synchronously, so we can just
3828 * immediately send the barrier reply. */
3829 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
3830 queue_tx(buf, ofconn, ofconn->reply_counter);
3835 handle_openflow(struct ofconn *ofconn, struct ofproto *p,
3836 struct ofpbuf *ofp_msg)
3838 struct ofp_header *oh = ofp_msg->data;
3841 COVERAGE_INC(ofproto_recv_openflow);
3843 case OFPT_ECHO_REQUEST:
3844 error = handle_echo_request(ofconn, oh);
3847 case OFPT_ECHO_REPLY:
3851 case OFPT_FEATURES_REQUEST:
3852 error = handle_features_request(p, ofconn, oh);
3855 case OFPT_GET_CONFIG_REQUEST:
3856 error = handle_get_config_request(p, ofconn, oh);
3859 case OFPT_SET_CONFIG:
3860 error = handle_set_config(p, ofconn, ofp_msg->data);
3863 case OFPT_PACKET_OUT:
3864 error = handle_packet_out(p, ofconn, ofp_msg->data);
3868 error = handle_port_mod(p, ofconn, oh);
3872 error = handle_flow_mod(p, ofconn, ofp_msg->data);
3875 case OFPT_STATS_REQUEST:
3876 error = handle_stats_request(p, ofconn, oh);
3880 error = handle_vendor(p, ofconn, ofp_msg->data);
3883 case OFPT_BARRIER_REQUEST:
3884 error = handle_barrier_request(ofconn, oh);
3888 if (VLOG_IS_WARN_ENABLED()) {
3889 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
3890 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
3893 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
3898 send_error_oh(ofconn, ofp_msg->data, error);
3903 handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
3905 struct odp_msg *msg = packet->data;
3907 struct ofpbuf payload;
3910 payload.data = msg + 1;
3911 payload.size = msg->length - sizeof *msg;
3912 flow_extract(&payload, msg->arg, msg->port, &flow);
3914 /* Check with in-band control to see if this packet should be sent
3915 * to the local port regardless of the flow table. */
3916 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
3917 union odp_action action;
3919 memset(&action, 0, sizeof(action));
3920 action.output.type = ODPAT_OUTPUT;
3921 action.output.port = ODPP_LOCAL;
3922 dpif_execute(p->dpif, flow.in_port, &action, 1, &payload);
3925 rule = lookup_valid_rule(p, &flow);
3927 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
3928 struct ofport *port = port_array_get(&p->ports, msg->port);
3930 if (port->opp.config & OFPPC_NO_PACKET_IN) {
3931 COVERAGE_INC(ofproto_no_packet_in);
3932 /* XXX install 'drop' flow entry */
3933 ofpbuf_delete(packet);
3937 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
3940 COVERAGE_INC(ofproto_packet_in);
3941 send_packet_in(p, packet);
3945 if (rule->cr.wc.wildcards) {
3946 rule = rule_create_subrule(p, rule, &flow);
3947 rule_make_actions(p, rule, packet);
3949 if (!rule->may_install) {
3950 /* The rule is not installable, that is, we need to process every
3951 * packet, so process the current packet and set its actions into
3953 rule_make_actions(p, rule, packet);
3955 /* XXX revalidate rule if it needs it */
3959 rule_execute(p, rule, &payload, &flow);
3960 rule_reinstall(p, rule);
3962 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY) {
3964 * Extra-special case for fail-open mode.
3966 * We are in fail-open mode and the packet matched the fail-open rule,
3967 * but we are connected to a controller too. We should send the packet
3968 * up to the controller in the hope that it will try to set up a flow
3969 * and thereby allow us to exit fail-open.
3971 * See the top-level comment in fail-open.c for more information.
3973 send_packet_in(p, packet);
3975 ofpbuf_delete(packet);
3980 handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
3982 struct odp_msg *msg = packet->data;
3984 switch (msg->type) {
3985 case _ODPL_ACTION_NR:
3986 COVERAGE_INC(ofproto_ctlr_action);
3987 send_packet_in(p, packet);
3990 case _ODPL_SFLOW_NR:
3992 ofproto_sflow_received(p->sflow, msg);
3994 ofpbuf_delete(packet);
3998 handle_odp_miss_msg(p, packet);
4002 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
4009 revalidate_cb(struct cls_rule *sub_, void *cbdata_)
4011 struct rule *sub = rule_from_cls_rule(sub_);
4012 struct revalidate_cbdata *cbdata = cbdata_;
4014 if (cbdata->revalidate_all
4015 || (cbdata->revalidate_subrules && sub->super)
4016 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
4017 revalidate_rule(cbdata->ofproto, sub);
4022 revalidate_rule(struct ofproto *p, struct rule *rule)
4024 const flow_t *flow = &rule->cr.flow;
4026 COVERAGE_INC(ofproto_revalidate_rule);
4029 super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
4031 rule_remove(p, rule);
4033 } else if (super != rule->super) {
4034 COVERAGE_INC(ofproto_revalidate_moved);
4035 list_remove(&rule->list);
4036 list_push_back(&super->list, &rule->list);
4037 rule->super = super;
4038 rule->hard_timeout = super->hard_timeout;
4039 rule->idle_timeout = super->idle_timeout;
4040 rule->created = super->created;
4045 rule_update_actions(p, rule);
4049 static struct ofpbuf *
4050 compose_flow_removed(struct ofproto *p, const struct rule *rule,
4051 long long int now, uint8_t reason)
4053 struct ofp_flow_removed *ofr;
4055 long long int tdiff = now - rule->created;
4056 uint32_t sec = tdiff / 1000;
4057 uint32_t msec = tdiff - (sec * 1000);
4059 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
4060 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->tun_id_from_cookie,
4062 ofr->cookie = rule->flow_cookie;
4063 ofr->priority = htons(rule->cr.priority);
4064 ofr->reason = reason;
4065 ofr->duration_sec = htonl(sec);
4066 ofr->duration_nsec = htonl(msec * 1000000);
4067 ofr->idle_timeout = htons(rule->idle_timeout);
4068 ofr->packet_count = htonll(rule->packet_count);
4069 ofr->byte_count = htonll(rule->byte_count);
4075 uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
4077 assert(rule->installed);
4078 assert(!rule->cr.wc.wildcards);
4081 rule_remove(ofproto, rule);
4083 rule_uninstall(ofproto, rule);
4088 send_flow_removed(struct ofproto *p, struct rule *rule,
4089 long long int now, uint8_t reason)
4091 struct ofconn *ofconn;
4092 struct ofconn *prev;
4093 struct ofpbuf *buf = NULL;
4095 /* We limit the maximum number of queued flow expirations it by accounting
4096 * them under the counter for replies. That works because preventing
4097 * OpenFlow requests from being processed also prevents new flows from
4098 * being added (and expiring). (It also prevents processing OpenFlow
4099 * requests that would not add new flows, so it is imperfect.) */
4102 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
4103 if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)
4104 && ofconn_receives_async_msgs(ofconn)) {
4106 queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
4108 buf = compose_flow_removed(p, rule, now, reason);
4114 queue_tx(buf, prev, prev->reply_counter);
4120 expire_rule(struct cls_rule *cls_rule, void *p_)
4122 struct ofproto *p = p_;
4123 struct rule *rule = rule_from_cls_rule(cls_rule);
4124 long long int hard_expire, idle_expire, expire, now;
4126 hard_expire = (rule->hard_timeout
4127 ? rule->created + rule->hard_timeout * 1000
4129 idle_expire = (rule->idle_timeout
4130 && (rule->super || list_is_empty(&rule->list))
4131 ? rule->used + rule->idle_timeout * 1000
4133 expire = MIN(hard_expire, idle_expire);
4137 if (rule->installed && now >= rule->used + 5000) {
4138 uninstall_idle_flow(p, rule);
4139 } else if (!rule->cr.wc.wildcards) {
4140 active_timeout(p, rule);
4146 COVERAGE_INC(ofproto_expired);
4148 /* Update stats. This code will be a no-op if the rule expired
4149 * due to an idle timeout. */
4150 if (rule->cr.wc.wildcards) {
4151 struct rule *subrule, *next;
4152 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
4153 rule_remove(p, subrule);
4156 rule_uninstall(p, rule);
4159 if (!rule_is_hidden(rule)) {
4160 send_flow_removed(p, rule, now,
4162 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
4164 rule_remove(p, rule);
4168 active_timeout(struct ofproto *ofproto, struct rule *rule)
4170 if (ofproto->netflow && !is_controller_rule(rule) &&
4171 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
4172 struct ofexpired expired;
4173 struct odp_flow odp_flow;
4175 /* Get updated flow stats. */
4176 memset(&odp_flow, 0, sizeof odp_flow);
4177 if (rule->installed) {
4178 odp_flow.key = rule->cr.flow;
4179 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
4180 dpif_flow_get(ofproto->dpif, &odp_flow);
4182 if (odp_flow.stats.n_packets) {
4183 update_time(ofproto, rule, &odp_flow.stats);
4184 netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
4185 odp_flow.stats.tcp_flags);
4189 expired.flow = rule->cr.flow;
4190 expired.packet_count = rule->packet_count +
4191 odp_flow.stats.n_packets;
4192 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
4193 expired.used = rule->used;
4195 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
4197 /* Schedule us to send the accumulated records once we have
4198 * collected all of them. */
4199 poll_immediate_wake();
4204 update_used(struct ofproto *p)
4206 struct odp_flow *flows;
4211 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
4216 for (i = 0; i < n_flows; i++) {
4217 struct odp_flow *f = &flows[i];
4220 rule = rule_from_cls_rule(
4221 classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
4222 if (!rule || !rule->installed) {
4223 COVERAGE_INC(ofproto_unexpected_rule);
4224 dpif_flow_del(p->dpif, f);
4228 update_time(p, rule, &f->stats);
4229 rule_account(p, rule, f->stats.n_bytes);
4234 /* pinsched callback for sending 'packet' on 'ofconn'. */
4236 do_send_packet_in(struct ofpbuf *packet, void *ofconn_)
4238 struct ofconn *ofconn = ofconn_;
4240 rconn_send_with_limit(ofconn->rconn, packet,
4241 ofconn->packet_in_counter, 100);
4244 /* Takes 'packet', which has been converted with do_convert_to_packet_in(), and
4245 * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s
4246 * packet scheduler for sending.
4248 * 'max_len' specifies the maximum number of bytes of the packet to send on
4249 * 'ofconn' (INT_MAX specifies no limit).
4251 * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise,
4252 * ownership is transferred to this function. */
4254 schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len,
4257 struct ofproto *ofproto = ofconn->ofproto;
4258 struct ofp_packet_in *opi = packet->data;
4259 uint16_t in_port = ofp_port_to_odp_port(ntohs(opi->in_port));
4260 int send_len, trim_size;
4264 if (opi->reason == OFPR_ACTION) {
4265 buffer_id = UINT32_MAX;
4266 } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
4267 buffer_id = pktbuf_get_null();
4268 } else if (!ofconn->pktbuf) {
4269 buffer_id = UINT32_MAX;
4271 struct ofpbuf payload;
4272 payload.data = opi->data;
4273 payload.size = packet->size - offsetof(struct ofp_packet_in, data);
4274 buffer_id = pktbuf_save(ofconn->pktbuf, &payload, in_port);
4277 /* Figure out how much of the packet to send. */
4278 send_len = ntohs(opi->total_len);
4279 if (buffer_id != UINT32_MAX) {
4280 send_len = MIN(send_len, ofconn->miss_send_len);
4282 send_len = MIN(send_len, max_len);
4284 /* Adjust packet length and clone if necessary. */
4285 trim_size = offsetof(struct ofp_packet_in, data) + send_len;
4287 packet = ofpbuf_clone_data(packet->data, trim_size);
4290 packet->size = trim_size;
4293 /* Update packet headers. */
4294 opi->buffer_id = htonl(buffer_id);
4295 update_openflow_length(packet);
4297 /* Hand over to packet scheduler. It might immediately call into
4298 * do_send_packet_in() or it might buffer it for a while (until a later
4299 * call to pinsched_run()). */
4300 pinsched_send(ofconn->schedulers[opi->reason], in_port,
4301 packet, do_send_packet_in, ofconn);
4304 /* Replace struct odp_msg header in 'packet' by equivalent struct
4305 * ofp_packet_in. The odp_msg must have sufficient headroom to do so (e.g. as
4306 * returned by dpif_recv()).
4308 * The conversion is not complete: the caller still needs to trim any unneeded
4309 * payload off the end of the buffer, set the length in the OpenFlow header,
4310 * and set buffer_id. Those require us to know the controller settings and so
4311 * must be done on a per-controller basis.
4313 * Returns the maximum number of bytes of the packet that should be sent to
4314 * the controller (INT_MAX if no limit). */
4316 do_convert_to_packet_in(struct ofpbuf *packet)
4318 struct odp_msg *msg = packet->data;
4319 struct ofp_packet_in *opi;
4325 /* Extract relevant header fields */
4326 if (msg->type == _ODPL_ACTION_NR) {
4327 reason = OFPR_ACTION;
4330 reason = OFPR_NO_MATCH;
4333 total_len = msg->length - sizeof *msg;
4334 in_port = odp_port_to_ofp_port(msg->port);
4336 /* Repurpose packet buffer by overwriting header. */
4337 ofpbuf_pull(packet, sizeof(struct odp_msg));
4338 opi = ofpbuf_push_zeros(packet, offsetof(struct ofp_packet_in, data));
4339 opi->header.version = OFP_VERSION;
4340 opi->header.type = OFPT_PACKET_IN;
4341 opi->total_len = htons(total_len);
4342 opi->in_port = htons(in_port);
4343 opi->reason = reason;
4348 /* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or
4349 * _ODPL_MISS_NR, sends an OFPT_PACKET_IN message to each OpenFlow controller
4350 * as necessary according to their individual configurations.
4352 * 'packet' must have sufficient headroom to convert it into a struct
4353 * ofp_packet_in (e.g. as returned by dpif_recv()).
4355 * Takes ownership of 'packet'. */
4357 send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet)
4359 struct ofconn *ofconn, *prev;
4362 max_len = do_convert_to_packet_in(packet);
4365 LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
4366 if (ofconn_receives_async_msgs(ofconn)) {
4368 schedule_packet_in(prev, packet, max_len, true);
4374 schedule_packet_in(prev, packet, max_len, false);
4376 ofpbuf_delete(packet);
4381 pick_datapath_id(const struct ofproto *ofproto)
4383 const struct ofport *port;
4385 port = port_array_get(&ofproto->ports, ODPP_LOCAL);
4387 uint8_t ea[ETH_ADDR_LEN];
4390 error = netdev_get_etheraddr(port->netdev, ea);
4392 return eth_addr_to_uint64(ea);
4394 VLOG_WARN("could not get MAC address for %s (%s)",
4395 netdev_get_name(port->netdev), strerror(error));
4397 return ofproto->fallback_dpid;
4401 pick_fallback_dpid(void)
4403 uint8_t ea[ETH_ADDR_LEN];
4404 eth_addr_nicira_random(ea);
4405 return eth_addr_to_uint64(ea);
4409 default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
4410 struct odp_actions *actions, tag_type *tags,
4411 uint16_t *nf_output_iface, void *ofproto_)
4413 struct ofproto *ofproto = ofproto_;
4416 /* Drop frames for reserved multicast addresses. */
4417 if (eth_addr_is_reserved(flow->dl_dst)) {
4421 /* Learn source MAC (but don't try to learn from revalidation). */
4422 if (packet != NULL) {
4423 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
4425 GRAT_ARP_LOCK_NONE);
4427 /* The log messages here could actually be useful in debugging,
4428 * so keep the rate limit relatively high. */
4429 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
4430 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
4431 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
4432 ofproto_revalidate(ofproto, rev_tag);
4436 /* Determine output port. */
4437 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags,
4440 add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
4441 } else if (out_port != flow->in_port) {
4442 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
4443 *nf_output_iface = out_port;
4451 static const struct ofhooks default_ofhooks = {
4453 default_normal_ofhook_cb,