2 * Copyright (c) 2009, 2010 Nicira Networks.
3 * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
22 #include <sys/socket.h>
24 #include <netinet/in.h>
27 #include "byte-order.h"
28 #include "classifier.h"
30 #include "discovery.h"
32 #include "dynamic-string.h"
33 #include "fail-open.h"
37 #include "mac-learning.h"
42 #include "ofp-print.h"
44 #include "ofproto-sflow.h"
46 #include "openflow/nicira-ext.h"
47 #include "openflow/openflow.h"
48 #include "openvswitch/datapath-protocol.h"
52 #include "poll-loop.h"
56 #include "stream-ssl.h"
64 VLOG_DEFINE_THIS_MODULE(ofproto);
66 #include "sflow_api.h"
69 struct hmap_node hmap_node; /* In struct ofproto's "ports" hmap. */
70 struct netdev *netdev;
71 struct ofp_phy_port opp; /* In host byte order. */
75 static void ofport_free(struct ofport *);
76 static void hton_ofp_phy_port(struct ofp_phy_port *);
78 static int xlate_actions(const union ofp_action *in, size_t n_in,
79 const struct flow *, struct ofproto *,
80 const struct ofpbuf *packet,
81 struct odp_actions *out, tag_type *tags,
82 bool *may_set_up_flow, uint16_t *nf_output_iface);
87 ovs_be64 flow_cookie; /* Controller-issued identifier. */
88 uint16_t idle_timeout; /* In seconds from time of last use. */
89 uint16_t hard_timeout; /* In seconds from time of creation. */
90 bool send_flow_removed; /* Send a flow removed message? */
91 long long int used; /* Time last used; time created if not used. */
92 long long int created; /* Creation time. */
93 uint64_t packet_count; /* Number of packets received. */
94 uint64_t byte_count; /* Number of bytes received. */
95 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
96 tag_type tags; /* Tags (set only by hooks). */
97 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
99 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
100 * exact-match rule (having cr.wc.wildcards of 0) generated from the
101 * wildcard rule 'super'. In this case, 'list' is an element of the
104 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
105 * a list of subrules. A super-rule with no wildcards (where
106 * cr.wc.wildcards is 0) will never have any subrules. */
112 * 'n_actions' is the number of elements in the 'actions' array. A single
113 * action may take up more more than one element's worth of space.
115 * A subrule has no actions (it uses the super-rule's actions). */
117 union ofp_action *actions;
121 * A super-rule with wildcard fields never has ODP actions (since the
122 * datapath only supports exact-match flows). */
123 bool installed; /* Installed in datapath? */
124 bool may_install; /* True ordinarily; false if actions must
125 * be reassessed for every packet. */
127 union odp_action *odp_actions;
131 rule_is_hidden(const struct rule *rule)
133 /* Subrules are merely an implementation detail, so hide them from the
135 if (rule->super != NULL) {
139 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
140 * (e.g. by in-band control) and are intentionally hidden from the
142 if (rule->cr.priority > UINT16_MAX) {
149 static struct rule *rule_create(struct ofproto *, struct rule *super,
150 const union ofp_action *, size_t n_actions,
151 uint16_t idle_timeout, uint16_t hard_timeout,
152 ovs_be64 flow_cookie, bool send_flow_removed);
153 static void rule_free(struct rule *);
154 static void rule_destroy(struct ofproto *, struct rule *);
155 static struct rule *rule_from_cls_rule(const struct cls_rule *);
156 static void rule_insert(struct ofproto *, struct rule *,
157 struct ofpbuf *packet, uint16_t in_port);
158 static void rule_remove(struct ofproto *, struct rule *);
159 static bool rule_make_actions(struct ofproto *, struct rule *,
160 const struct ofpbuf *packet);
161 static void rule_install(struct ofproto *, struct rule *,
162 struct rule *displaced_rule);
163 static void rule_uninstall(struct ofproto *, struct rule *);
164 static void rule_post_uninstall(struct ofproto *, struct rule *);
165 static void send_flow_removed(struct ofproto *, struct rule *, uint8_t reason);
167 /* ofproto supports two kinds of OpenFlow connections:
169 * - "Primary" connections to ordinary OpenFlow controllers. ofproto
170 * maintains persistent connections to these controllers and by default
171 * sends them asynchronous messages such as packet-ins.
173 * - "Service" connections, e.g. from ovs-ofctl. When these connections
174 * drop, it is the other side's responsibility to reconnect them if
175 * necessary. ofproto does not send them asynchronous messages by default.
177 * Currently, active (tcp, ssl, unix) connections are always "primary"
178 * connections and passive (ptcp, pssl, punix) connections are always "service"
179 * connections. There is no inherent reason for this, but it reflects the
183 OFCONN_PRIMARY, /* An ordinary OpenFlow controller. */
184 OFCONN_SERVICE /* A service connection, e.g. "ovs-ofctl". */
187 /* A listener for incoming OpenFlow "service" connections. */
189 struct hmap_node node; /* In struct ofproto's "services" hmap. */
190 struct pvconn *pvconn; /* OpenFlow connection listener. */
192 /* These are not used by ofservice directly. They are settings for
193 * accepted "struct ofconn"s from the pvconn. */
194 int probe_interval; /* Max idle time before probing, in seconds. */
195 int rate_limit; /* Max packet-in rate in packets per second. */
196 int burst_limit; /* Limit on accumulating packet credits. */
199 static struct ofservice *ofservice_lookup(struct ofproto *,
201 static int ofservice_create(struct ofproto *,
202 const struct ofproto_controller *);
203 static void ofservice_reconfigure(struct ofservice *,
204 const struct ofproto_controller *);
205 static void ofservice_destroy(struct ofproto *, struct ofservice *);
207 /* An OpenFlow connection. */
209 struct ofproto *ofproto; /* The ofproto that owns this connection. */
210 struct list node; /* In struct ofproto's "all_conns" list. */
211 struct rconn *rconn; /* OpenFlow connection. */
212 enum ofconn_type type; /* Type. */
213 int flow_format; /* One of NXFF_*. */
215 /* OFPT_PACKET_IN related data. */
216 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
217 struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */
218 struct pktbuf *pktbuf; /* OpenFlow packet buffers. */
219 int miss_send_len; /* Bytes to send of buffered packets. */
221 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
222 * requests, and the maximum number before we stop reading OpenFlow
224 #define OFCONN_REPLY_MAX 100
225 struct rconn_packet_counter *reply_counter;
227 /* type == OFCONN_PRIMARY only. */
228 enum nx_role role; /* Role. */
229 struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */
230 struct discovery *discovery; /* Controller discovery object, if enabled. */
231 struct status_category *ss; /* Switch status category. */
232 enum ofproto_band band; /* In-band or out-of-band? */
235 /* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's
236 * "schedulers" array. Their values are 0 and 1, and their meanings and values
237 * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In
238 * case anything ever changes, check their values here. */
239 #define N_SCHEDULERS 2
240 BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0);
241 BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR);
242 BUILD_ASSERT_DECL(OFPR_ACTION == 1);
243 BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR);
245 static struct ofconn *ofconn_create(struct ofproto *, struct rconn *,
247 static void ofconn_destroy(struct ofconn *);
248 static void ofconn_run(struct ofconn *);
249 static void ofconn_wait(struct ofconn *);
250 static bool ofconn_receives_async_msgs(const struct ofconn *);
251 static char *ofconn_make_name(const struct ofproto *, const char *target);
252 static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
254 static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
255 struct rconn_packet_counter *counter);
257 static void send_packet_in(struct ofproto *, struct ofpbuf *odp_msg);
258 static void do_send_packet_in(struct ofpbuf *odp_msg, void *ofconn);
262 uint64_t datapath_id; /* Datapath ID. */
263 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
264 char *mfr_desc; /* Manufacturer. */
265 char *hw_desc; /* Hardware. */
266 char *sw_desc; /* Software version. */
267 char *serial_desc; /* Serial number. */
268 char *dp_desc; /* Datapath description. */
272 struct netdev_monitor *netdev_monitor;
273 struct hmap ports; /* Contains "struct ofport"s. */
274 struct shash port_by_name;
278 struct switch_status *switch_status;
279 struct fail_open *fail_open;
280 struct netflow *netflow;
281 struct ofproto_sflow *sflow;
283 /* In-band control. */
284 struct in_band *in_band;
285 long long int next_in_band_update;
286 struct sockaddr_in *extra_in_band_remotes;
287 size_t n_extra_remotes;
290 struct classifier cls;
291 bool need_revalidate;
292 long long int next_expiration;
293 struct tag_set revalidate_set;
295 /* OpenFlow connections. */
296 struct hmap controllers; /* Controller "struct ofconn"s. */
297 struct list all_conns; /* Contains "struct ofconn"s. */
298 enum ofproto_fail_mode fail_mode;
300 /* OpenFlow listeners. */
301 struct hmap services; /* Contains "struct ofservice"s. */
302 struct pvconn **snoops;
305 /* Hooks for ovs-vswitchd. */
306 const struct ofhooks *ofhooks;
309 /* Used by default ofhooks. */
310 struct mac_learning *ml;
313 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
315 static const struct ofhooks default_ofhooks;
317 static uint64_t pick_datapath_id(const struct ofproto *);
318 static uint64_t pick_fallback_dpid(void);
320 static int ofproto_expire(struct ofproto *);
322 static void update_stats(struct ofproto *, struct rule *,
323 const struct odp_flow_stats *);
324 static bool revalidate_rule(struct ofproto *p, struct rule *rule);
325 static void revalidate_cb(struct cls_rule *rule_, void *p_);
327 static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
329 static void handle_openflow(struct ofconn *, struct ofpbuf *);
331 static struct ofport *get_port(const struct ofproto *, uint16_t odp_port);
332 static void update_port(struct ofproto *, const char *devname);
333 static int init_ports(struct ofproto *);
334 static void reinit_ports(struct ofproto *);
337 ofproto_create(const char *datapath, const char *datapath_type,
338 const struct ofhooks *ofhooks, void *aux,
339 struct ofproto **ofprotop)
341 struct odp_stats stats;
348 /* Connect to datapath and start listening for messages. */
349 error = dpif_open(datapath, datapath_type, &dpif);
351 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
354 error = dpif_get_dp_stats(dpif, &stats);
356 VLOG_ERR("failed to obtain stats for datapath %s: %s",
357 datapath, strerror(error));
361 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
363 VLOG_ERR("failed to listen on datapath %s: %s",
364 datapath, strerror(error));
368 dpif_flow_flush(dpif);
369 dpif_recv_purge(dpif);
371 /* Initialize settings. */
372 p = xzalloc(sizeof *p);
373 p->fallback_dpid = pick_fallback_dpid();
374 p->datapath_id = p->fallback_dpid;
375 p->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
376 p->hw_desc = xstrdup(DEFAULT_HW_DESC);
377 p->sw_desc = xstrdup(DEFAULT_SW_DESC);
378 p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
379 p->dp_desc = xstrdup(DEFAULT_DP_DESC);
381 /* Initialize datapath. */
383 p->netdev_monitor = netdev_monitor_create();
384 hmap_init(&p->ports);
385 shash_init(&p->port_by_name);
386 p->max_ports = stats.max_ports;
388 /* Initialize submodules. */
389 p->switch_status = switch_status_create(p);
395 /* Initialize flow table. */
396 classifier_init(&p->cls);
397 p->need_revalidate = false;
398 p->next_expiration = time_msec() + 1000;
399 tag_set_init(&p->revalidate_set);
401 /* Initialize OpenFlow connections. */
402 list_init(&p->all_conns);
403 hmap_init(&p->controllers);
404 hmap_init(&p->services);
408 /* Initialize hooks. */
410 p->ofhooks = ofhooks;
414 p->ofhooks = &default_ofhooks;
416 p->ml = mac_learning_create();
419 /* Pick final datapath ID. */
420 p->datapath_id = pick_datapath_id(p);
421 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
428 ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
430 uint64_t old_dpid = p->datapath_id;
431 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
432 if (p->datapath_id != old_dpid) {
433 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
435 /* Force all active connections to reconnect, since there is no way to
436 * notify a controller that the datapath ID has changed. */
437 ofproto_reconnect_controllers(p);
442 is_discovery_controller(const struct ofproto_controller *c)
444 return !strcmp(c->target, "discover");
448 is_in_band_controller(const struct ofproto_controller *c)
450 return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND;
453 /* Creates a new controller in 'ofproto'. Some of the settings are initially
454 * drawn from 'c', but update_controller() needs to be called later to finish
455 * the new ofconn's configuration. */
457 add_controller(struct ofproto *ofproto, const struct ofproto_controller *c)
459 struct discovery *discovery;
460 struct ofconn *ofconn;
462 if (is_discovery_controller(c)) {
463 int error = discovery_create(c->accept_re, c->update_resolv_conf,
464 ofproto->dpif, ofproto->switch_status,
473 ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_PRIMARY);
474 ofconn->pktbuf = pktbuf_create();
475 ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
477 ofconn->discovery = discovery;
479 char *name = ofconn_make_name(ofproto, c->target);
480 rconn_connect(ofconn->rconn, c->target, name);
483 hmap_insert(&ofproto->controllers, &ofconn->hmap_node,
484 hash_string(c->target, 0));
487 /* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's
488 * target or turn discovery on or off (these are done by creating new ofconns
489 * and deleting old ones), but it can update the rest of an ofconn's
492 update_controller(struct ofconn *ofconn, const struct ofproto_controller *c)
496 ofconn->band = (is_in_band_controller(c)
497 ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND);
499 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
501 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
502 rconn_set_probe_interval(ofconn->rconn, probe_interval);
504 if (ofconn->discovery) {
505 discovery_set_update_resolv_conf(ofconn->discovery,
506 c->update_resolv_conf);
507 discovery_set_accept_controller_re(ofconn->discovery, c->accept_re);
510 ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit);
514 ofconn_get_target(const struct ofconn *ofconn)
516 return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn);
519 static struct ofconn *
520 find_controller_by_target(struct ofproto *ofproto, const char *target)
522 struct ofconn *ofconn;
524 HMAP_FOR_EACH_WITH_HASH (ofconn, hmap_node,
525 hash_string(target, 0), &ofproto->controllers) {
526 if (!strcmp(ofconn_get_target(ofconn), target)) {
534 update_in_band_remotes(struct ofproto *ofproto)
536 const struct ofconn *ofconn;
537 struct sockaddr_in *addrs;
538 size_t max_addrs, n_addrs;
542 /* Allocate enough memory for as many remotes as we could possibly have. */
543 max_addrs = ofproto->n_extra_remotes + hmap_count(&ofproto->controllers);
544 addrs = xmalloc(max_addrs * sizeof *addrs);
547 /* Add all the remotes. */
549 HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) {
550 struct sockaddr_in *sin = &addrs[n_addrs];
552 if (ofconn->band == OFPROTO_OUT_OF_BAND) {
556 sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn);
557 if (sin->sin_addr.s_addr) {
558 sin->sin_port = rconn_get_remote_port(ofconn->rconn);
561 if (ofconn->discovery) {
565 for (i = 0; i < ofproto->n_extra_remotes; i++) {
566 addrs[n_addrs++] = ofproto->extra_in_band_remotes[i];
569 /* Create or update or destroy in-band.
571 * Ordinarily we only enable in-band if there's at least one remote
572 * address, but discovery needs the in-band rules for DHCP to be installed
573 * even before we know any remote addresses. */
574 if (n_addrs || discovery) {
575 if (!ofproto->in_band) {
576 in_band_create(ofproto, ofproto->dpif, ofproto->switch_status,
579 if (ofproto->in_band) {
580 in_band_set_remotes(ofproto->in_band, addrs, n_addrs);
582 ofproto->next_in_band_update = time_msec() + 1000;
584 in_band_destroy(ofproto->in_band);
585 ofproto->in_band = NULL;
593 update_fail_open(struct ofproto *p)
595 struct ofconn *ofconn;
597 if (!hmap_is_empty(&p->controllers)
598 && p->fail_mode == OFPROTO_FAIL_STANDALONE) {
599 struct rconn **rconns;
603 p->fail_open = fail_open_create(p, p->switch_status);
607 rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns);
608 HMAP_FOR_EACH (ofconn, hmap_node, &p->controllers) {
609 rconns[n++] = ofconn->rconn;
612 fail_open_set_controllers(p->fail_open, rconns, n);
613 /* p->fail_open takes ownership of 'rconns'. */
615 fail_open_destroy(p->fail_open);
621 ofproto_set_controllers(struct ofproto *p,
622 const struct ofproto_controller *controllers,
623 size_t n_controllers)
625 struct shash new_controllers;
626 struct ofconn *ofconn, *next_ofconn;
627 struct ofservice *ofservice, *next_ofservice;
631 /* Create newly configured controllers and services.
632 * Create a name to ofproto_controller mapping in 'new_controllers'. */
633 shash_init(&new_controllers);
634 for (i = 0; i < n_controllers; i++) {
635 const struct ofproto_controller *c = &controllers[i];
637 if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) {
638 if (!find_controller_by_target(p, c->target)) {
639 add_controller(p, c);
641 } else if (!pvconn_verify_name(c->target)) {
642 if (!ofservice_lookup(p, c->target) && ofservice_create(p, c)) {
646 VLOG_WARN_RL(&rl, "%s: unsupported controller \"%s\"",
647 dpif_name(p->dpif), c->target);
651 shash_add_once(&new_controllers, c->target, &controllers[i]);
654 /* Delete controllers that are no longer configured.
655 * Update configuration of all now-existing controllers. */
657 HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) {
658 struct ofproto_controller *c;
660 c = shash_find_data(&new_controllers, ofconn_get_target(ofconn));
662 ofconn_destroy(ofconn);
664 update_controller(ofconn, c);
671 /* Delete services that are no longer configured.
672 * Update configuration of all now-existing services. */
673 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
674 struct ofproto_controller *c;
676 c = shash_find_data(&new_controllers,
677 pvconn_get_name(ofservice->pvconn));
679 ofservice_destroy(p, ofservice);
681 ofservice_reconfigure(ofservice, c);
685 shash_destroy(&new_controllers);
687 update_in_band_remotes(p);
690 if (!hmap_is_empty(&p->controllers) && !ss_exists) {
691 ofconn = CONTAINER_OF(hmap_first(&p->controllers),
692 struct ofconn, hmap_node);
693 ofconn->ss = switch_status_register(p->switch_status, "remote",
694 rconn_status_cb, ofconn->rconn);
699 ofproto_set_fail_mode(struct ofproto *p, enum ofproto_fail_mode fail_mode)
701 p->fail_mode = fail_mode;
705 /* Drops the connections between 'ofproto' and all of its controllers, forcing
706 * them to reconnect. */
708 ofproto_reconnect_controllers(struct ofproto *ofproto)
710 struct ofconn *ofconn;
712 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
713 rconn_reconnect(ofconn->rconn);
718 any_extras_changed(const struct ofproto *ofproto,
719 const struct sockaddr_in *extras, size_t n)
723 if (n != ofproto->n_extra_remotes) {
727 for (i = 0; i < n; i++) {
728 const struct sockaddr_in *old = &ofproto->extra_in_band_remotes[i];
729 const struct sockaddr_in *new = &extras[i];
731 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
732 old->sin_port != new->sin_port) {
740 /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s
741 * in-band control should guarantee access, in the same way that in-band
742 * control guarantees access to OpenFlow controllers. */
744 ofproto_set_extra_in_band_remotes(struct ofproto *ofproto,
745 const struct sockaddr_in *extras, size_t n)
747 if (!any_extras_changed(ofproto, extras, n)) {
751 free(ofproto->extra_in_band_remotes);
752 ofproto->n_extra_remotes = n;
753 ofproto->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
755 update_in_band_remotes(ofproto);
759 ofproto_set_desc(struct ofproto *p,
760 const char *mfr_desc, const char *hw_desc,
761 const char *sw_desc, const char *serial_desc,
764 struct ofp_desc_stats *ods;
767 if (strlen(mfr_desc) >= sizeof ods->mfr_desc) {
768 VLOG_WARN("truncating mfr_desc, must be less than %zu characters",
769 sizeof ods->mfr_desc);
772 p->mfr_desc = xstrdup(mfr_desc);
775 if (strlen(hw_desc) >= sizeof ods->hw_desc) {
776 VLOG_WARN("truncating hw_desc, must be less than %zu characters",
777 sizeof ods->hw_desc);
780 p->hw_desc = xstrdup(hw_desc);
783 if (strlen(sw_desc) >= sizeof ods->sw_desc) {
784 VLOG_WARN("truncating sw_desc, must be less than %zu characters",
785 sizeof ods->sw_desc);
788 p->sw_desc = xstrdup(sw_desc);
791 if (strlen(serial_desc) >= sizeof ods->serial_num) {
792 VLOG_WARN("truncating serial_desc, must be less than %zu "
794 sizeof ods->serial_num);
796 free(p->serial_desc);
797 p->serial_desc = xstrdup(serial_desc);
800 if (strlen(dp_desc) >= sizeof ods->dp_desc) {
801 VLOG_WARN("truncating dp_desc, must be less than %zu characters",
802 sizeof ods->dp_desc);
805 p->dp_desc = xstrdup(dp_desc);
810 set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
811 const struct svec *svec)
813 struct pvconn **pvconns = *pvconnsp;
814 size_t n_pvconns = *n_pvconnsp;
818 for (i = 0; i < n_pvconns; i++) {
819 pvconn_close(pvconns[i]);
823 pvconns = xmalloc(svec->n * sizeof *pvconns);
825 for (i = 0; i < svec->n; i++) {
826 const char *name = svec->names[i];
827 struct pvconn *pvconn;
830 error = pvconn_open(name, &pvconn);
832 pvconns[n_pvconns++] = pvconn;
834 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
842 *n_pvconnsp = n_pvconns;
848 ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
850 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
854 ofproto_set_netflow(struct ofproto *ofproto,
855 const struct netflow_options *nf_options)
857 if (nf_options && nf_options->collectors.n) {
858 if (!ofproto->netflow) {
859 ofproto->netflow = netflow_create();
861 return netflow_set_options(ofproto->netflow, nf_options);
863 netflow_destroy(ofproto->netflow);
864 ofproto->netflow = NULL;
870 ofproto_set_sflow(struct ofproto *ofproto,
871 const struct ofproto_sflow_options *oso)
873 struct ofproto_sflow *os = ofproto->sflow;
876 struct ofport *ofport;
878 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
879 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
880 ofproto_sflow_add_port(os, ofport->odp_port,
881 netdev_get_name(ofport->netdev));
884 ofproto_sflow_set_options(os, oso);
886 ofproto_sflow_destroy(os);
887 ofproto->sflow = NULL;
892 ofproto_get_datapath_id(const struct ofproto *ofproto)
894 return ofproto->datapath_id;
898 ofproto_has_primary_controller(const struct ofproto *ofproto)
900 return !hmap_is_empty(&ofproto->controllers);
903 enum ofproto_fail_mode
904 ofproto_get_fail_mode(const struct ofproto *p)
910 ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
914 for (i = 0; i < ofproto->n_snoops; i++) {
915 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
920 ofproto_destroy(struct ofproto *p)
922 struct ofservice *ofservice, *next_ofservice;
923 struct ofconn *ofconn, *next_ofconn;
924 struct ofport *ofport, *next_ofport;
931 /* Destroy fail-open and in-band early, since they touch the classifier. */
932 fail_open_destroy(p->fail_open);
935 in_band_destroy(p->in_band);
937 free(p->extra_in_band_remotes);
939 ofproto_flush_flows(p);
940 classifier_destroy(&p->cls);
942 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
943 ofconn_destroy(ofconn);
945 hmap_destroy(&p->controllers);
948 netdev_monitor_destroy(p->netdev_monitor);
949 HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) {
950 hmap_remove(&p->ports, &ofport->hmap_node);
953 shash_destroy(&p->port_by_name);
955 switch_status_destroy(p->switch_status);
956 netflow_destroy(p->netflow);
957 ofproto_sflow_destroy(p->sflow);
959 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
960 ofservice_destroy(p, ofservice);
962 hmap_destroy(&p->services);
964 for (i = 0; i < p->n_snoops; i++) {
965 pvconn_close(p->snoops[i]);
969 mac_learning_destroy(p->ml);
974 free(p->serial_desc);
977 hmap_destroy(&p->ports);
983 ofproto_run(struct ofproto *p)
985 int error = ofproto_run1(p);
987 error = ofproto_run2(p, false);
993 process_port_change(struct ofproto *ofproto, int error, char *devname)
995 if (error == ENOBUFS) {
996 reinit_ports(ofproto);
998 update_port(ofproto, devname);
1003 /* Returns a "preference level" for snooping 'ofconn'. A higher return value
1004 * means that 'ofconn' is more interesting for monitoring than a lower return
1007 snoop_preference(const struct ofconn *ofconn)
1009 switch (ofconn->role) {
1010 case NX_ROLE_MASTER:
1017 /* Shouldn't happen. */
1022 /* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'.
1023 * Connects this vconn to a controller. */
1025 add_snooper(struct ofproto *ofproto, struct vconn *vconn)
1027 struct ofconn *ofconn, *best;
1029 /* Pick a controller for monitoring. */
1031 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
1032 if (ofconn->type == OFCONN_PRIMARY
1033 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
1039 rconn_add_monitor(best->rconn, vconn);
1041 VLOG_INFO_RL(&rl, "no controller connection to snoop");
1047 ofproto_run1(struct ofproto *p)
1049 struct ofconn *ofconn, *next_ofconn;
1050 struct ofservice *ofservice;
1055 if (shash_is_empty(&p->port_by_name)) {
1059 for (i = 0; i < 50; i++) {
1062 error = dpif_recv(p->dpif, &buf);
1064 if (error == ENODEV) {
1065 /* Someone destroyed the datapath behind our back. The caller
1066 * better destroy us and give up, because we're just going to
1067 * spin from here on out. */
1068 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
1069 VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
1070 dpif_name(p->dpif));
1076 handle_odp_msg(p, buf);
1079 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
1080 process_port_change(p, error, devname);
1082 while ((error = netdev_monitor_poll(p->netdev_monitor,
1083 &devname)) != EAGAIN) {
1084 process_port_change(p, error, devname);
1088 if (time_msec() >= p->next_in_band_update) {
1089 update_in_band_remotes(p);
1091 in_band_run(p->in_band);
1094 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
1098 /* Fail-open maintenance. Do this after processing the ofconns since
1099 * fail-open checks the status of the controller rconn. */
1101 fail_open_run(p->fail_open);
1104 HMAP_FOR_EACH (ofservice, node, &p->services) {
1105 struct vconn *vconn;
1108 retval = pvconn_accept(ofservice->pvconn, OFP_VERSION, &vconn);
1110 struct rconn *rconn;
1113 rconn = rconn_create(ofservice->probe_interval, 0);
1114 name = ofconn_make_name(p, vconn_get_name(vconn));
1115 rconn_connect_unreliably(rconn, vconn, name);
1118 ofconn = ofconn_create(p, rconn, OFCONN_SERVICE);
1119 ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
1120 ofservice->burst_limit);
1121 } else if (retval != EAGAIN) {
1122 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1126 for (i = 0; i < p->n_snoops; i++) {
1127 struct vconn *vconn;
1130 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
1132 add_snooper(p, vconn);
1133 } else if (retval != EAGAIN) {
1134 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1138 if (time_msec() >= p->next_expiration) {
1139 int delay = ofproto_expire(p);
1140 p->next_expiration = time_msec() + delay;
1141 COVERAGE_INC(ofproto_expiration);
1145 netflow_run(p->netflow);
1148 ofproto_sflow_run(p->sflow);
1154 struct revalidate_cbdata {
1155 struct ofproto *ofproto;
1156 bool revalidate_all; /* Revalidate all exact-match rules? */
1157 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
1158 struct tag_set revalidate_set; /* Set of tags to revalidate. */
1162 ofproto_run2(struct ofproto *p, bool revalidate_all)
1164 if (p->need_revalidate || revalidate_all
1165 || !tag_set_is_empty(&p->revalidate_set)) {
1166 struct revalidate_cbdata cbdata;
1168 cbdata.revalidate_all = revalidate_all;
1169 cbdata.revalidate_subrules = p->need_revalidate;
1170 cbdata.revalidate_set = p->revalidate_set;
1171 tag_set_init(&p->revalidate_set);
1172 COVERAGE_INC(ofproto_revalidate);
1173 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
1174 p->need_revalidate = false;
1181 ofproto_wait(struct ofproto *p)
1183 struct ofservice *ofservice;
1184 struct ofconn *ofconn;
1187 dpif_recv_wait(p->dpif);
1188 dpif_port_poll_wait(p->dpif);
1189 netdev_monitor_poll_wait(p->netdev_monitor);
1190 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
1191 ofconn_wait(ofconn);
1194 poll_timer_wait_until(p->next_in_band_update);
1195 in_band_wait(p->in_band);
1198 fail_open_wait(p->fail_open);
1201 ofproto_sflow_wait(p->sflow);
1203 if (!tag_set_is_empty(&p->revalidate_set)) {
1204 poll_immediate_wake();
1206 if (p->need_revalidate) {
1207 /* Shouldn't happen, but if it does just go around again. */
1208 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1209 poll_immediate_wake();
1210 } else if (p->next_expiration != LLONG_MAX) {
1211 poll_timer_wait_until(p->next_expiration);
1213 HMAP_FOR_EACH (ofservice, node, &p->services) {
1214 pvconn_wait(ofservice->pvconn);
1216 for (i = 0; i < p->n_snoops; i++) {
1217 pvconn_wait(p->snoops[i]);
1222 ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
1224 tag_set_add(&ofproto->revalidate_set, tag);
1228 ofproto_get_revalidate_set(struct ofproto *ofproto)
1230 return &ofproto->revalidate_set;
1234 ofproto_is_alive(const struct ofproto *p)
1236 return !hmap_is_empty(&p->controllers);
1239 /* Deletes port number 'odp_port' from the datapath for 'ofproto'.
1241 * This is almost the same as calling dpif_port_del() directly on the
1242 * datapath, but it also makes 'ofproto' close its open netdev for the port
1243 * (if any). This makes it possible to create a new netdev of a different
1244 * type under the same name, which otherwise the netdev library would refuse
1245 * to do because of the conflict. (The netdev would eventually get closed on
1246 * the next trip through ofproto_run(), but this interface is more direct.)
1248 * Returns 0 if successful, otherwise a positive errno. */
1250 ofproto_port_del(struct ofproto *ofproto, uint16_t odp_port)
1252 struct ofport *ofport = get_port(ofproto, odp_port);
1253 const char *name = ofport ? (char *) ofport->opp.name : "<unknown>";
1256 error = dpif_port_del(ofproto->dpif, odp_port);
1258 VLOG_ERR("%s: failed to remove port %"PRIu16" (%s) interface (%s)",
1259 dpif_name(ofproto->dpif), odp_port, name, strerror(error));
1260 } else if (ofport) {
1261 /* 'name' is ofport->opp.name and update_port() is going to destroy
1262 * 'ofport'. Just in case update_port() refers to 'name' after it
1263 * destroys 'ofport', make a copy of it around the update_port()
1265 char *devname = xstrdup(name);
1266 update_port(ofproto, devname);
1272 /* Checks if 'ofproto' thinks 'odp_port' should be included in floods. Returns
1273 * true if 'odp_port' exists and should be included, false otherwise. */
1275 ofproto_port_is_floodable(struct ofproto *ofproto, uint16_t odp_port)
1277 struct ofport *ofport = get_port(ofproto, odp_port);
1278 return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD);
1282 ofproto_send_packet(struct ofproto *p, const struct flow *flow,
1283 const union ofp_action *actions, size_t n_actions,
1284 const struct ofpbuf *packet)
1286 struct odp_actions odp_actions;
1289 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
1295 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1297 dpif_execute(p->dpif, odp_actions.actions, odp_actions.n_actions, packet);
1301 /* Adds a flow to the OpenFlow flow table in 'p' that matches 'cls_rule' and
1302 * performs the 'n_actions' actions in 'actions'. The new flow will not
1305 * If cls_rule->priority is in the range of priorities supported by OpenFlow
1306 * (0...65535, inclusive) then the flow will be visible to OpenFlow
1307 * controllers; otherwise, it will be hidden.
1309 * The caller retains ownership of 'cls_rule' and 'actions'. */
1311 ofproto_add_flow(struct ofproto *p, const struct cls_rule *cls_rule,
1312 const union ofp_action *actions, size_t n_actions)
1315 rule = rule_create(p, NULL, actions, n_actions, 0, 0, 0, false);
1316 rule->cr = *cls_rule;
1317 rule_insert(p, rule, NULL, 0);
1321 ofproto_delete_flow(struct ofproto *ofproto, const struct cls_rule *target)
1325 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1328 rule_remove(ofproto, rule);
1333 destroy_rule(struct cls_rule *rule_, void *ofproto_)
1335 struct rule *rule = rule_from_cls_rule(rule_);
1336 struct ofproto *ofproto = ofproto_;
1338 /* Mark the flow as not installed, even though it might really be
1339 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1340 * There is no point in uninstalling it individually since we are about to
1341 * blow away all the flows with dpif_flow_flush(). */
1342 rule->installed = false;
1344 rule_remove(ofproto, rule);
1348 ofproto_flush_flows(struct ofproto *ofproto)
1350 COVERAGE_INC(ofproto_flush);
1351 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
1352 dpif_flow_flush(ofproto->dpif);
1353 if (ofproto->in_band) {
1354 in_band_flushed(ofproto->in_band);
1356 if (ofproto->fail_open) {
1357 fail_open_flushed(ofproto->fail_open);
1362 reinit_ports(struct ofproto *p)
1364 struct svec devnames;
1365 struct ofport *ofport;
1366 struct odp_port *odp_ports;
1370 COVERAGE_INC(ofproto_reinit_ports);
1372 svec_init(&devnames);
1373 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
1374 svec_add (&devnames, (char *) ofport->opp.name);
1376 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
1377 for (i = 0; i < n_odp_ports; i++) {
1378 svec_add (&devnames, odp_ports[i].devname);
1382 svec_sort_unique(&devnames);
1383 for (i = 0; i < devnames.n; i++) {
1384 update_port(p, devnames.names[i]);
1386 svec_destroy(&devnames);
1389 static struct ofport *
1390 make_ofport(const struct odp_port *odp_port)
1392 struct netdev_options netdev_options;
1393 enum netdev_flags flags;
1394 struct ofport *ofport;
1395 struct netdev *netdev;
1398 memset(&netdev_options, 0, sizeof netdev_options);
1399 netdev_options.name = odp_port->devname;
1400 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
1402 error = netdev_open(&netdev_options, &netdev);
1404 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1405 "cannot be opened (%s)",
1406 odp_port->devname, odp_port->port,
1407 odp_port->devname, strerror(error));
1411 ofport = xmalloc(sizeof *ofport);
1412 ofport->netdev = netdev;
1413 ofport->odp_port = odp_port->port;
1414 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
1415 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
1416 memcpy(ofport->opp.name, odp_port->devname,
1417 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1418 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1420 netdev_get_flags(netdev, &flags);
1421 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1423 ofport->opp.state = netdev_get_carrier(netdev) ? 0 : OFPPS_LINK_DOWN;
1425 netdev_get_features(netdev,
1426 &ofport->opp.curr, &ofport->opp.advertised,
1427 &ofport->opp.supported, &ofport->opp.peer);
1432 ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1434 if (get_port(p, odp_port->port)) {
1435 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1438 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1439 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1448 ofport_equal(const struct ofport *a_, const struct ofport *b_)
1450 const struct ofp_phy_port *a = &a_->opp;
1451 const struct ofp_phy_port *b = &b_->opp;
1453 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1454 return (a->port_no == b->port_no
1455 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1456 && !strcmp((char *) a->name, (char *) b->name)
1457 && a->state == b->state
1458 && a->config == b->config
1459 && a->curr == b->curr
1460 && a->advertised == b->advertised
1461 && a->supported == b->supported
1462 && a->peer == b->peer);
1466 send_port_status(struct ofproto *p, const struct ofport *ofport,
1469 /* XXX Should limit the number of queued port status change messages. */
1470 struct ofconn *ofconn;
1471 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
1472 struct ofp_port_status *ops;
1475 if (!ofconn_receives_async_msgs(ofconn)) {
1479 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1480 ops->reason = reason;
1481 ops->desc = ofport->opp;
1482 hton_ofp_phy_port(&ops->desc);
1483 queue_tx(b, ofconn, NULL);
1488 ofport_install(struct ofproto *p, struct ofport *ofport)
1490 const char *netdev_name = (const char *) ofport->opp.name;
1492 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
1493 hmap_insert(&p->ports, &ofport->hmap_node, hash_int(ofport->odp_port, 0));
1494 shash_add(&p->port_by_name, netdev_name, ofport);
1496 ofproto_sflow_add_port(p->sflow, ofport->odp_port, netdev_name);
1501 ofport_remove(struct ofproto *p, struct ofport *ofport)
1503 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
1504 hmap_remove(&p->ports, &ofport->hmap_node);
1505 shash_delete(&p->port_by_name,
1506 shash_find(&p->port_by_name, (char *) ofport->opp.name));
1508 ofproto_sflow_del_port(p->sflow, ofport->odp_port);
1513 ofport_free(struct ofport *ofport)
1516 netdev_close(ofport->netdev);
1521 static struct ofport *
1522 get_port(const struct ofproto *ofproto, uint16_t odp_port)
1524 struct ofport *port;
1526 HMAP_FOR_EACH_IN_BUCKET (port, hmap_node,
1527 hash_int(odp_port, 0), &ofproto->ports) {
1528 if (port->odp_port == odp_port) {
1536 update_port(struct ofproto *p, const char *devname)
1538 struct odp_port odp_port;
1539 struct ofport *old_ofport;
1540 struct ofport *new_ofport;
1543 COVERAGE_INC(ofproto_update_port);
1545 /* Query the datapath for port information. */
1546 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
1548 /* Find the old ofport. */
1549 old_ofport = shash_find_data(&p->port_by_name, devname);
1552 /* There's no port named 'devname' but there might be a port with
1553 * the same port number. This could happen if a port is deleted
1554 * and then a new one added in its place very quickly, or if a port
1555 * is renamed. In the former case we want to send an OFPPR_DELETE
1556 * and an OFPPR_ADD, and in the latter case we want to send a
1557 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1558 * the old port's ifindex against the new port, or perhaps less
1559 * reliably but more portably by comparing the old port's MAC
1560 * against the new port's MAC. However, this code isn't that smart
1561 * and always sends an OFPPR_MODIFY (XXX). */
1562 old_ofport = get_port(p, odp_port.port);
1564 } else if (error != ENOENT && error != ENODEV) {
1565 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1566 "%s", strerror(error));
1570 /* Create a new ofport. */
1571 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1573 /* Eliminate a few pathological cases. */
1574 if (!old_ofport && !new_ofport) {
1576 } else if (old_ofport && new_ofport) {
1577 /* Most of the 'config' bits are OpenFlow soft state, but
1578 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1579 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1580 * leaves the other bits 0.) */
1581 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1583 if (ofport_equal(old_ofport, new_ofport)) {
1584 /* False alarm--no change. */
1585 ofport_free(new_ofport);
1590 /* Now deal with the normal cases. */
1592 ofport_remove(p, old_ofport);
1595 ofport_install(p, new_ofport);
1597 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1598 (!old_ofport ? OFPPR_ADD
1599 : !new_ofport ? OFPPR_DELETE
1601 ofport_free(old_ofport);
1605 init_ports(struct ofproto *p)
1607 struct odp_port *ports;
1612 error = dpif_port_list(p->dpif, &ports, &n_ports);
1617 for (i = 0; i < n_ports; i++) {
1618 const struct odp_port *odp_port = &ports[i];
1619 if (!ofport_conflicts(p, odp_port)) {
1620 struct ofport *ofport = make_ofport(odp_port);
1622 ofport_install(p, ofport);
1630 static struct ofconn *
1631 ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type)
1633 struct ofconn *ofconn = xzalloc(sizeof *ofconn);
1634 ofconn->ofproto = p;
1635 list_push_back(&p->all_conns, &ofconn->node);
1636 ofconn->rconn = rconn;
1637 ofconn->type = type;
1638 ofconn->flow_format = NXFF_OPENFLOW10;
1639 ofconn->role = NX_ROLE_OTHER;
1640 ofconn->packet_in_counter = rconn_packet_counter_create ();
1641 ofconn->pktbuf = NULL;
1642 ofconn->miss_send_len = 0;
1643 ofconn->reply_counter = rconn_packet_counter_create ();
1648 ofconn_destroy(struct ofconn *ofconn)
1650 if (ofconn->type == OFCONN_PRIMARY) {
1651 hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
1653 discovery_destroy(ofconn->discovery);
1655 list_remove(&ofconn->node);
1656 switch_status_unregister(ofconn->ss);
1657 rconn_destroy(ofconn->rconn);
1658 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1659 rconn_packet_counter_destroy(ofconn->reply_counter);
1660 pktbuf_destroy(ofconn->pktbuf);
1665 ofconn_run(struct ofconn *ofconn)
1667 struct ofproto *p = ofconn->ofproto;
1671 if (ofconn->discovery) {
1672 char *controller_name;
1673 if (rconn_is_connectivity_questionable(ofconn->rconn)) {
1674 discovery_question_connectivity(ofconn->discovery);
1676 if (discovery_run(ofconn->discovery, &controller_name)) {
1677 if (controller_name) {
1678 char *ofconn_name = ofconn_make_name(p, controller_name);
1679 rconn_connect(ofconn->rconn, controller_name, ofconn_name);
1682 rconn_disconnect(ofconn->rconn);
1687 for (i = 0; i < N_SCHEDULERS; i++) {
1688 pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
1691 rconn_run(ofconn->rconn);
1693 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1694 /* Limit the number of iterations to prevent other tasks from
1696 for (iteration = 0; iteration < 50; iteration++) {
1697 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1702 fail_open_maybe_recover(p->fail_open);
1704 handle_openflow(ofconn, of_msg);
1705 ofpbuf_delete(of_msg);
1709 if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) {
1710 ofconn_destroy(ofconn);
1715 ofconn_wait(struct ofconn *ofconn)
1719 if (ofconn->discovery) {
1720 discovery_wait(ofconn->discovery);
1722 for (i = 0; i < N_SCHEDULERS; i++) {
1723 pinsched_wait(ofconn->schedulers[i]);
1725 rconn_run_wait(ofconn->rconn);
1726 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1727 rconn_recv_wait(ofconn->rconn);
1729 COVERAGE_INC(ofproto_ofconn_stuck);
1733 /* Returns true if 'ofconn' should receive asynchronous messages. */
1735 ofconn_receives_async_msgs(const struct ofconn *ofconn)
1737 if (ofconn->type == OFCONN_PRIMARY) {
1738 /* Primary controllers always get asynchronous messages unless they
1739 * have configured themselves as "slaves". */
1740 return ofconn->role != NX_ROLE_SLAVE;
1742 /* Service connections don't get asynchronous messages unless they have
1743 * explicitly asked for them by setting a nonzero miss send length. */
1744 return ofconn->miss_send_len > 0;
1748 /* Returns a human-readable name for an OpenFlow connection between 'ofproto'
1749 * and 'target', suitable for use in log messages for identifying the
1752 * The name is dynamically allocated. The caller should free it (with free())
1753 * when it is no longer needed. */
1755 ofconn_make_name(const struct ofproto *ofproto, const char *target)
1757 return xasprintf("%s<->%s", dpif_base_name(ofproto->dpif), target);
1761 ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst)
1765 for (i = 0; i < N_SCHEDULERS; i++) {
1766 struct pinsched **s = &ofconn->schedulers[i];
1770 *s = pinsched_create(rate, burst,
1771 ofconn->ofproto->switch_status);
1773 pinsched_set_limits(*s, rate, burst);
1776 pinsched_destroy(*s);
1783 ofservice_reconfigure(struct ofservice *ofservice,
1784 const struct ofproto_controller *c)
1786 ofservice->probe_interval = c->probe_interval;
1787 ofservice->rate_limit = c->rate_limit;
1788 ofservice->burst_limit = c->burst_limit;
1791 /* Creates a new ofservice in 'ofproto'. Returns 0 if successful, otherwise a
1792 * positive errno value. */
1794 ofservice_create(struct ofproto *ofproto, const struct ofproto_controller *c)
1796 struct ofservice *ofservice;
1797 struct pvconn *pvconn;
1800 error = pvconn_open(c->target, &pvconn);
1805 ofservice = xzalloc(sizeof *ofservice);
1806 hmap_insert(&ofproto->services, &ofservice->node,
1807 hash_string(c->target, 0));
1808 ofservice->pvconn = pvconn;
1810 ofservice_reconfigure(ofservice, c);
1816 ofservice_destroy(struct ofproto *ofproto, struct ofservice *ofservice)
1818 hmap_remove(&ofproto->services, &ofservice->node);
1819 pvconn_close(ofservice->pvconn);
1823 /* Finds and returns the ofservice within 'ofproto' that has the given
1824 * 'target', or a null pointer if none exists. */
1825 static struct ofservice *
1826 ofservice_lookup(struct ofproto *ofproto, const char *target)
1828 struct ofservice *ofservice;
1830 HMAP_FOR_EACH_WITH_HASH (ofservice, node, hash_string(target, 0),
1831 &ofproto->services) {
1832 if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
1839 /* Caller is responsible for initializing the 'cr' member of the returned
1841 static struct rule *
1842 rule_create(struct ofproto *ofproto, struct rule *super,
1843 const union ofp_action *actions, size_t n_actions,
1844 uint16_t idle_timeout, uint16_t hard_timeout,
1845 ovs_be64 flow_cookie, bool send_flow_removed)
1847 struct rule *rule = xzalloc(sizeof *rule);
1848 rule->idle_timeout = idle_timeout;
1849 rule->hard_timeout = hard_timeout;
1850 rule->flow_cookie = flow_cookie;
1851 rule->used = rule->created = time_msec();
1852 rule->send_flow_removed = send_flow_removed;
1853 rule->super = super;
1855 list_push_back(&super->list, &rule->list);
1857 list_init(&rule->list);
1859 if (n_actions > 0) {
1860 rule->n_actions = n_actions;
1861 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
1863 netflow_flow_clear(&rule->nf_flow);
1864 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1869 static struct rule *
1870 rule_from_cls_rule(const struct cls_rule *cls_rule)
1872 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1876 rule_free(struct rule *rule)
1878 free(rule->actions);
1879 free(rule->odp_actions);
1883 /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1884 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1885 * through all of its subrules and revalidates them, destroying any that no
1886 * longer has a super-rule (which is probably all of them).
1888 * Before calling this function, the caller must make have removed 'rule' from
1889 * the classifier. If 'rule' is an exact-match rule, the caller is also
1890 * responsible for ensuring that it has been uninstalled from the datapath. */
1892 rule_destroy(struct ofproto *ofproto, struct rule *rule)
1895 struct rule *subrule, *next;
1896 LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
1897 revalidate_rule(ofproto, subrule);
1900 list_remove(&rule->list);
1906 rule_has_out_port(const struct rule *rule, ovs_be16 out_port)
1908 const union ofp_action *oa;
1909 struct actions_iterator i;
1911 if (out_port == htons(OFPP_NONE)) {
1914 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1915 oa = actions_next(&i)) {
1916 if (action_outputs_to_port(oa, out_port)) {
1923 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
1924 * 'packet', which arrived on 'in_port'.
1926 * Takes ownership of 'packet'. */
1928 execute_odp_actions(struct ofproto *ofproto, uint16_t in_port,
1929 const union odp_action *actions, size_t n_actions,
1930 struct ofpbuf *packet)
1932 if (n_actions == 1 && actions[0].type == ODPAT_CONTROLLER) {
1933 /* As an optimization, avoid a round-trip from userspace to kernel to
1934 * userspace. This also avoids possibly filling up kernel packet
1935 * buffers along the way. */
1936 struct odp_msg *msg;
1938 msg = ofpbuf_push_uninit(packet, sizeof *msg);
1939 msg->type = _ODPL_ACTION_NR;
1940 msg->length = sizeof(struct odp_msg) + packet->size;
1941 msg->port = in_port;
1943 msg->arg = actions[0].controller.arg;
1945 send_packet_in(ofproto, packet);
1951 error = dpif_execute(ofproto->dpif, actions, n_actions, packet);
1952 ofpbuf_delete(packet);
1957 /* Executes the actions indicated by 'rule' on 'packet', which is in flow
1958 * 'flow' and is considered to have arrived on ODP port 'in_port'. 'packet'
1959 * must have at least sizeof(struct ofp_packet_in) bytes of headroom.
1961 * The flow that 'packet' actually contains does not need to actually match
1962 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1963 * the packet and byte counters for 'rule' will be credited for the packet sent
1964 * out whether or not the packet actually matches 'rule'.
1966 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1967 * the caller must already have accurately composed ODP actions for it given
1968 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1969 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1970 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1971 * actions and apply them to 'packet'.
1973 * Takes ownership of 'packet'. */
1975 rule_execute(struct ofproto *ofproto, struct rule *rule,
1976 struct ofpbuf *packet, const struct flow *flow)
1978 const union odp_action *actions;
1979 struct odp_flow_stats stats;
1981 struct odp_actions a;
1983 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
1985 /* Grab or compose the ODP actions.
1987 * The special case for an exact-match 'rule' where 'flow' is not the
1988 * rule's flow is important to avoid, e.g., sending a packet out its input
1989 * port simply because the ODP actions were composed for the wrong
1991 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1992 struct rule *super = rule->super ? rule->super : rule;
1993 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
1994 packet, &a, NULL, 0, NULL)) {
1995 ofpbuf_delete(packet);
1998 actions = a.actions;
1999 n_actions = a.n_actions;
2001 actions = rule->odp_actions;
2002 n_actions = rule->n_odp_actions;
2005 /* Execute the ODP actions. */
2006 flow_extract_stats(flow, packet, &stats);
2007 if (execute_odp_actions(ofproto, flow->in_port,
2008 actions, n_actions, packet)) {
2009 update_stats(ofproto, rule, &stats);
2010 rule->used = time_msec();
2011 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
2015 /* Inserts 'rule' into 'p''s flow table.
2017 * If 'packet' is nonnull, takes ownership of 'packet', executes 'rule''s
2018 * actions on it and credits the statistics for sending the packet to 'rule'.
2019 * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of
2022 rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
2025 struct rule *displaced_rule;
2027 /* Insert the rule in the classifier. */
2028 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
2029 if (!rule->cr.wc.wildcards) {
2030 rule_make_actions(p, rule, packet);
2033 /* Send the packet and credit it to the rule. */
2036 flow_extract(packet, 0, in_port, &flow);
2037 rule_execute(p, rule, packet, &flow);
2040 /* Install the rule in the datapath only after sending the packet, to
2041 * avoid packet reordering. */
2042 if (rule->cr.wc.wildcards) {
2043 COVERAGE_INC(ofproto_add_wc_flow);
2044 p->need_revalidate = true;
2046 rule_install(p, rule, displaced_rule);
2049 /* Free the rule that was displaced, if any. */
2050 if (displaced_rule) {
2051 rule_destroy(p, displaced_rule);
2055 static struct rule *
2056 rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
2057 const struct flow *flow)
2059 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
2060 rule->idle_timeout, rule->hard_timeout,
2062 COVERAGE_INC(ofproto_subrule_create);
2063 cls_rule_init_exact(flow, (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
2064 : rule->cr.priority),
2067 if (classifier_insert(&ofproto->cls, &subrule->cr)) {
2075 /* Remove 'rule' from 'ofproto' and free up the associated memory:
2077 * - If 'rule' was installed in the datapath, uninstalls it and updates
2078 * 'rule''s statistics (or its super-rule's statistics, if it is a
2079 * subrule), via rule_uninstall().
2081 * - Removes 'rule' from the classifier.
2083 * - If 'rule' is a super-rule that has subrules, revalidates (and possibly
2084 * uninstalls and destroys) its subrules, via rule_destroy().
2087 rule_remove(struct ofproto *ofproto, struct rule *rule)
2089 if (rule->cr.wc.wildcards) {
2090 COVERAGE_INC(ofproto_del_wc_flow);
2091 ofproto->need_revalidate = true;
2093 rule_uninstall(ofproto, rule);
2095 classifier_remove(&ofproto->cls, &rule->cr);
2096 rule_destroy(ofproto, rule);
2099 /* Returns true if the actions changed, false otherwise. */
2101 rule_make_actions(struct ofproto *p, struct rule *rule,
2102 const struct ofpbuf *packet)
2104 const struct rule *super;
2105 struct odp_actions a;
2108 assert(!rule->cr.wc.wildcards);
2110 super = rule->super ? rule->super : rule;
2112 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
2113 packet, &a, &rule->tags, &rule->may_install,
2114 &rule->nf_flow.output_iface);
2116 actions_len = a.n_actions * sizeof *a.actions;
2117 if (rule->n_odp_actions != a.n_actions
2118 || memcmp(rule->odp_actions, a.actions, actions_len)) {
2119 COVERAGE_INC(ofproto_odp_unchanged);
2120 free(rule->odp_actions);
2121 rule->n_odp_actions = a.n_actions;
2122 rule->odp_actions = xmemdup(a.actions, actions_len);
2130 do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
2131 struct odp_flow_put *put)
2133 memset(&put->flow.stats, 0, sizeof put->flow.stats);
2134 odp_flow_key_from_flow(&put->flow.key, &rule->cr.flow);
2135 put->flow.actions = rule->odp_actions;
2136 put->flow.n_actions = rule->n_odp_actions;
2137 put->flow.flags = 0;
2139 return dpif_flow_put(ofproto->dpif, put);
2143 rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
2145 assert(!rule->cr.wc.wildcards);
2147 if (rule->may_install) {
2148 struct odp_flow_put put;
2149 if (!do_put_flow(p, rule,
2150 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
2152 rule->installed = true;
2153 if (displaced_rule) {
2154 update_stats(p, displaced_rule, &put.flow.stats);
2155 rule_post_uninstall(p, displaced_rule);
2158 } else if (displaced_rule) {
2159 rule_uninstall(p, displaced_rule);
2164 rule_reinstall(struct ofproto *ofproto, struct rule *rule)
2166 if (rule->installed) {
2167 struct odp_flow_put put;
2168 COVERAGE_INC(ofproto_dp_missed);
2169 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
2171 rule_install(ofproto, rule, NULL);
2176 rule_update_actions(struct ofproto *ofproto, struct rule *rule)
2178 bool actions_changed;
2179 uint16_t new_out_iface, old_out_iface;
2181 old_out_iface = rule->nf_flow.output_iface;
2182 actions_changed = rule_make_actions(ofproto, rule, NULL);
2184 if (rule->may_install) {
2185 if (rule->installed) {
2186 if (actions_changed) {
2187 struct odp_flow_put put;
2188 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
2189 | ODPPF_ZERO_STATS, &put);
2190 update_stats(ofproto, rule, &put.flow.stats);
2192 /* Temporarily set the old output iface so that NetFlow
2193 * messages have the correct output interface for the old
2195 new_out_iface = rule->nf_flow.output_iface;
2196 rule->nf_flow.output_iface = old_out_iface;
2197 rule_post_uninstall(ofproto, rule);
2198 rule->nf_flow.output_iface = new_out_iface;
2201 rule_install(ofproto, rule, NULL);
2204 rule_uninstall(ofproto, rule);
2209 rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
2211 uint64_t total_bytes = rule->byte_count + extra_bytes;
2213 if (ofproto->ofhooks->account_flow_cb
2214 && total_bytes > rule->accounted_bytes)
2216 ofproto->ofhooks->account_flow_cb(
2217 &rule->cr.flow, rule->tags, rule->odp_actions, rule->n_odp_actions,
2218 total_bytes - rule->accounted_bytes, ofproto->aux);
2219 rule->accounted_bytes = total_bytes;
2223 /* 'rule' must be an exact-match rule in 'p'.
2225 * If 'rule' is installed in the datapath, uninstalls it and updates's
2226 * statistics. If 'rule' is a subrule, the statistics that are updated are
2227 * actually its super-rule's statistics; otherwise 'rule''s own statistics are
2230 * If 'rule' is not installed, this function has no effect. */
2232 rule_uninstall(struct ofproto *p, struct rule *rule)
2234 assert(!rule->cr.wc.wildcards);
2235 if (rule->installed) {
2236 struct odp_flow odp_flow;
2238 odp_flow_key_from_flow(&odp_flow.key, &rule->cr.flow);
2239 odp_flow.actions = NULL;
2240 odp_flow.n_actions = 0;
2242 if (!dpif_flow_del(p->dpif, &odp_flow)) {
2243 update_stats(p, rule, &odp_flow.stats);
2245 rule->installed = false;
2247 rule_post_uninstall(p, rule);
2252 is_controller_rule(struct rule *rule)
2254 /* If the only action is send to the controller then don't report
2255 * NetFlow expiration messages since it is just part of the control
2256 * logic for the network and not real traffic. */
2260 && rule->super->n_actions == 1
2261 && action_outputs_to_port(&rule->super->actions[0],
2262 htons(OFPP_CONTROLLER)));
2266 rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
2268 struct rule *super = rule->super;
2270 rule_account(ofproto, rule, 0);
2272 if (ofproto->netflow && !is_controller_rule(rule)) {
2273 struct ofexpired expired;
2274 expired.flow = rule->cr.flow;
2275 expired.packet_count = rule->packet_count;
2276 expired.byte_count = rule->byte_count;
2277 expired.used = rule->used;
2278 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
2281 super->packet_count += rule->packet_count;
2282 super->byte_count += rule->byte_count;
2284 /* Reset counters to prevent double counting if the rule ever gets
2286 rule->packet_count = 0;
2287 rule->byte_count = 0;
2288 rule->accounted_bytes = 0;
2290 netflow_flow_clear(&rule->nf_flow);
2295 queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
2296 struct rconn_packet_counter *counter)
2298 update_openflow_length(msg);
2299 if (rconn_send(ofconn->rconn, msg, counter)) {
2305 send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
2308 struct ofpbuf *buf = make_ofp_error_msg(error, oh);
2310 COVERAGE_INC(ofproto_error);
2311 queue_tx(buf, ofconn, ofconn->reply_counter);
2316 hton_ofp_phy_port(struct ofp_phy_port *opp)
2318 opp->port_no = htons(opp->port_no);
2319 opp->config = htonl(opp->config);
2320 opp->state = htonl(opp->state);
2321 opp->curr = htonl(opp->curr);
2322 opp->advertised = htonl(opp->advertised);
2323 opp->supported = htonl(opp->supported);
2324 opp->peer = htonl(opp->peer);
2328 handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
2330 struct ofp_header *rq = oh;
2331 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
2336 handle_features_request(struct ofconn *ofconn, struct ofp_header *oh)
2338 struct ofp_switch_features *osf;
2340 struct ofport *port;
2342 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
2343 osf->datapath_id = htonll(ofconn->ofproto->datapath_id);
2344 osf->n_buffers = htonl(pktbuf_capacity());
2346 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
2347 OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
2348 osf->actions = htonl((1u << OFPAT_OUTPUT) |
2349 (1u << OFPAT_SET_VLAN_VID) |
2350 (1u << OFPAT_SET_VLAN_PCP) |
2351 (1u << OFPAT_STRIP_VLAN) |
2352 (1u << OFPAT_SET_DL_SRC) |
2353 (1u << OFPAT_SET_DL_DST) |
2354 (1u << OFPAT_SET_NW_SRC) |
2355 (1u << OFPAT_SET_NW_DST) |
2356 (1u << OFPAT_SET_NW_TOS) |
2357 (1u << OFPAT_SET_TP_SRC) |
2358 (1u << OFPAT_SET_TP_DST) |
2359 (1u << OFPAT_ENQUEUE));
2361 HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) {
2362 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
2365 queue_tx(buf, ofconn, ofconn->reply_counter);
2370 handle_get_config_request(struct ofconn *ofconn, struct ofp_header *oh)
2373 struct ofp_switch_config *osc;
2377 /* Figure out flags. */
2378 dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags);
2379 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
2382 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
2383 osc->flags = htons(flags);
2384 osc->miss_send_len = htons(ofconn->miss_send_len);
2385 queue_tx(buf, ofconn, ofconn->reply_counter);
2391 handle_set_config(struct ofconn *ofconn, struct ofp_switch_config *osc)
2396 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
2400 flags = ntohs(osc->flags);
2402 if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) {
2403 switch (flags & OFPC_FRAG_MASK) {
2404 case OFPC_FRAG_NORMAL:
2405 dpif_set_drop_frags(ofconn->ofproto->dpif, false);
2407 case OFPC_FRAG_DROP:
2408 dpif_set_drop_frags(ofconn->ofproto->dpif, true);
2411 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
2417 ofconn->miss_send_len = ntohs(osc->miss_send_len);
2423 add_controller_action(struct odp_actions *actions, uint16_t max_len)
2425 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
2426 a->controller.arg = max_len;
2429 struct action_xlate_ctx {
2431 struct flow flow; /* Flow to which these actions correspond. */
2432 int recurse; /* Recursion level, via xlate_table_action. */
2433 struct ofproto *ofproto;
2434 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
2435 * null pointer if we are revalidating
2436 * without a packet to refer to. */
2439 struct odp_actions *out; /* Datapath actions. */
2440 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
2441 bool may_set_up_flow; /* True ordinarily; false if the actions must
2442 * be reassessed for every packet. */
2443 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
2446 /* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a
2447 * flow translation. */
2448 #define MAX_RESUBMIT_RECURSION 8
2450 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2451 struct action_xlate_ctx *ctx);
2454 add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2456 const struct ofport *ofport = get_port(ctx->ofproto, port);
2459 if (ofport->opp.config & OFPPC_NO_FWD) {
2460 /* Forwarding disabled on port. */
2465 * We don't have an ofport record for this port, but it doesn't hurt to
2466 * allow forwarding to it anyhow. Maybe such a port will appear later
2467 * and we're pre-populating the flow table.
2471 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
2472 ctx->nf_output_iface = port;
2475 static struct rule *
2476 lookup_valid_rule(struct ofproto *ofproto, const struct flow *flow)
2479 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow,
2482 /* The rule we found might not be valid, since we could be in need of
2483 * revalidation. If it is not valid, don't return it. */
2486 && ofproto->need_revalidate
2487 && !revalidate_rule(ofproto, rule)) {
2488 COVERAGE_INC(ofproto_invalidated);
2496 xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2498 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
2499 uint16_t old_in_port;
2502 /* Look up a flow with 'in_port' as the input port. Then restore the
2503 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2504 * have surprising behavior). */
2505 old_in_port = ctx->flow.in_port;
2506 ctx->flow.in_port = in_port;
2507 rule = lookup_valid_rule(ctx->ofproto, &ctx->flow);
2508 ctx->flow.in_port = old_in_port;
2516 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2520 struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
2522 VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times",
2523 MAX_RESUBMIT_RECURSION);
2528 flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask,
2529 uint16_t *nf_output_iface, struct odp_actions *actions)
2531 struct ofport *ofport;
2533 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
2534 uint16_t odp_port = ofport->odp_port;
2535 if (odp_port != odp_in_port && !(ofport->opp.config & mask)) {
2536 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = odp_port;
2539 *nf_output_iface = NF_OUT_FLOOD;
2543 xlate_output_action__(struct action_xlate_ctx *ctx,
2544 uint16_t port, uint16_t max_len)
2547 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2549 ctx->nf_output_iface = NF_OUT_DROP;
2553 add_output_action(ctx, ctx->flow.in_port);
2556 xlate_table_action(ctx, ctx->flow.in_port);
2559 if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet,
2560 ctx->out, ctx->tags,
2561 &ctx->nf_output_iface,
2562 ctx->ofproto->aux)) {
2563 COVERAGE_INC(ofproto_uninstallable);
2564 ctx->may_set_up_flow = false;
2568 flood_packets(ctx->ofproto, ctx->flow.in_port, OFPPC_NO_FLOOD,
2569 &ctx->nf_output_iface, ctx->out);
2572 flood_packets(ctx->ofproto, ctx->flow.in_port, 0,
2573 &ctx->nf_output_iface, ctx->out);
2575 case OFPP_CONTROLLER:
2576 add_controller_action(ctx->out, max_len);
2579 add_output_action(ctx, ODPP_LOCAL);
2582 odp_port = ofp_port_to_odp_port(port);
2583 if (odp_port != ctx->flow.in_port) {
2584 add_output_action(ctx, odp_port);
2589 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2590 ctx->nf_output_iface = NF_OUT_FLOOD;
2591 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2592 ctx->nf_output_iface = prev_nf_output_iface;
2593 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2594 ctx->nf_output_iface != NF_OUT_FLOOD) {
2595 ctx->nf_output_iface = NF_OUT_MULTI;
2600 xlate_output_action(struct action_xlate_ctx *ctx,
2601 const struct ofp_action_output *oao)
2603 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
2606 /* If the final ODP action in 'ctx' is "pop priority", drop it, as an
2607 * optimization, because we're going to add another action that sets the
2608 * priority immediately after, or because there are no actions following the
2611 remove_pop_action(struct action_xlate_ctx *ctx)
2613 size_t n = ctx->out->n_actions;
2614 if (n > 0 && ctx->out->actions[n - 1].type == ODPAT_POP_PRIORITY) {
2615 ctx->out->n_actions--;
2620 xlate_enqueue_action(struct action_xlate_ctx *ctx,
2621 const struct ofp_action_enqueue *oae)
2623 uint16_t ofp_port, odp_port;
2627 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
2630 /* Fall back to ordinary output action. */
2631 xlate_output_action__(ctx, ntohs(oae->port), 0);
2635 /* Figure out ODP output port. */
2636 ofp_port = ntohs(oae->port);
2637 if (ofp_port != OFPP_IN_PORT) {
2638 odp_port = ofp_port_to_odp_port(ofp_port);
2640 odp_port = ctx->flow.in_port;
2643 /* Add ODP actions. */
2644 remove_pop_action(ctx);
2645 odp_actions_add(ctx->out, ODPAT_SET_PRIORITY)->priority.priority
2647 add_output_action(ctx, odp_port);
2648 odp_actions_add(ctx->out, ODPAT_POP_PRIORITY);
2650 /* Update NetFlow output port. */
2651 if (ctx->nf_output_iface == NF_OUT_DROP) {
2652 ctx->nf_output_iface = odp_port;
2653 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
2654 ctx->nf_output_iface = NF_OUT_MULTI;
2659 xlate_set_queue_action(struct action_xlate_ctx *ctx,
2660 const struct nx_action_set_queue *nasq)
2665 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
2668 /* Couldn't translate queue to a priority, so ignore. A warning
2669 * has already been logged. */
2673 remove_pop_action(ctx);
2674 odp_actions_add(ctx->out, ODPAT_SET_PRIORITY)->priority.priority
2679 xlate_set_dl_tci(struct action_xlate_ctx *ctx)
2681 ovs_be16 dl_vlan = ctx->flow.dl_vlan;
2682 uint8_t dl_vlan_pcp = ctx->flow.dl_vlan_pcp;
2684 if (dl_vlan == htons(OFP_VLAN_NONE)) {
2685 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2687 union odp_action *oa = odp_actions_add(ctx->out, ODPAT_SET_DL_TCI);
2688 oa->dl_tci.tci = htons(ntohs(dl_vlan & htons(VLAN_VID_MASK))
2689 | (dl_vlan_pcp << VLAN_PCP_SHIFT)
2695 xlate_reg_move_action(struct action_xlate_ctx *ctx,
2696 const struct nx_action_reg_move *narm)
2698 ovs_be16 old_vlan = ctx->flow.dl_vlan;
2699 uint8_t old_pcp = ctx->flow.dl_vlan_pcp;
2701 nxm_execute_reg_move(narm, &ctx->flow);
2703 if (ctx->flow.dl_vlan != old_vlan || ctx->flow.dl_vlan_pcp != old_pcp) {
2704 xlate_set_dl_tci(ctx);
2709 xlate_nicira_action(struct action_xlate_ctx *ctx,
2710 const struct nx_action_header *nah)
2712 const struct nx_action_resubmit *nar;
2713 const struct nx_action_set_tunnel *nast;
2714 const struct nx_action_set_queue *nasq;
2715 union odp_action *oa;
2716 int subtype = ntohs(nah->subtype);
2718 assert(nah->vendor == htonl(NX_VENDOR_ID));
2720 case NXAST_RESUBMIT:
2721 nar = (const struct nx_action_resubmit *) nah;
2722 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2725 case NXAST_SET_TUNNEL:
2726 nast = (const struct nx_action_set_tunnel *) nah;
2727 oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL);
2728 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
2731 case NXAST_DROP_SPOOFED_ARP:
2732 if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
2733 odp_actions_add(ctx->out, ODPAT_DROP_SPOOFED_ARP);
2737 case NXAST_SET_QUEUE:
2738 nasq = (const struct nx_action_set_queue *) nah;
2739 xlate_set_queue_action(ctx, nasq);
2742 case NXAST_POP_QUEUE:
2743 odp_actions_add(ctx->out, ODPAT_POP_PRIORITY);
2746 case NXAST_REG_MOVE:
2747 xlate_reg_move_action(ctx, (const struct nx_action_reg_move *) nah);
2750 case NXAST_REG_LOAD:
2751 nxm_execute_reg_load((const struct nx_action_reg_load *) nah,
2755 /* If you add a new action here that modifies flow data, don't forget to
2756 * update the flow key in ctx->flow at the same time. */
2759 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2765 do_xlate_actions(const union ofp_action *in, size_t n_in,
2766 struct action_xlate_ctx *ctx)
2768 struct actions_iterator iter;
2769 const union ofp_action *ia;
2770 const struct ofport *port;
2772 port = get_port(ctx->ofproto, ctx->flow.in_port);
2773 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2774 port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
2775 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2776 /* Drop this flow. */
2780 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2781 uint16_t type = ntohs(ia->type);
2782 union odp_action *oa;
2786 xlate_output_action(ctx, &ia->output);
2789 case OFPAT_SET_VLAN_VID:
2790 ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid;
2791 xlate_set_dl_tci(ctx);
2794 case OFPAT_SET_VLAN_PCP:
2795 ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp;
2796 xlate_set_dl_tci(ctx);
2799 case OFPAT_STRIP_VLAN:
2800 ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
2801 ctx->flow.dl_vlan_pcp = 0;
2802 xlate_set_dl_tci(ctx);
2805 case OFPAT_SET_DL_SRC:
2806 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2807 memcpy(oa->dl_addr.dl_addr,
2808 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2809 memcpy(ctx->flow.dl_src,
2810 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2813 case OFPAT_SET_DL_DST:
2814 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2815 memcpy(oa->dl_addr.dl_addr,
2816 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2817 memcpy(ctx->flow.dl_dst,
2818 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2821 case OFPAT_SET_NW_SRC:
2822 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2823 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2826 case OFPAT_SET_NW_DST:
2827 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2828 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2831 case OFPAT_SET_NW_TOS:
2832 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
2833 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2836 case OFPAT_SET_TP_SRC:
2837 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2838 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
2841 case OFPAT_SET_TP_DST:
2842 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2843 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
2847 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2851 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
2855 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2862 xlate_actions(const union ofp_action *in, size_t n_in,
2863 const struct flow *flow, struct ofproto *ofproto,
2864 const struct ofpbuf *packet,
2865 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2866 uint16_t *nf_output_iface)
2868 tag_type no_tags = 0;
2869 struct action_xlate_ctx ctx;
2870 COVERAGE_INC(ofproto_ofp2odp);
2871 odp_actions_init(out);
2874 ctx.ofproto = ofproto;
2875 ctx.packet = packet;
2877 ctx.tags = tags ? tags : &no_tags;
2878 ctx.may_set_up_flow = true;
2879 ctx.nf_output_iface = NF_OUT_DROP;
2880 do_xlate_actions(in, n_in, &ctx);
2881 remove_pop_action(&ctx);
2883 /* Check with in-band control to see if we're allowed to set up this
2885 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
2886 ctx.may_set_up_flow = false;
2889 if (may_set_up_flow) {
2890 *may_set_up_flow = ctx.may_set_up_flow;
2892 if (nf_output_iface) {
2893 *nf_output_iface = ctx.nf_output_iface;
2895 if (odp_actions_overflow(out)) {
2896 COVERAGE_INC(odp_overflow);
2897 odp_actions_init(out);
2898 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2903 /* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow
2904 * error message code (composed with ofp_mkerr()) for the caller to propagate
2905 * upward. Otherwise, returns 0.
2907 * The log message mentions 'msg_type'. */
2909 reject_slave_controller(struct ofconn *ofconn, const const char *msg_type)
2911 if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) {
2912 static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
2913 VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
2916 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
2923 handle_packet_out(struct ofconn *ofconn, struct ofp_header *oh)
2925 struct ofproto *p = ofconn->ofproto;
2926 struct ofp_packet_out *opo;
2927 struct ofpbuf payload, *buffer;
2928 union ofp_action *ofp_actions;
2929 struct odp_actions odp_actions;
2930 struct ofpbuf request;
2932 size_t n_ofp_actions;
2936 COVERAGE_INC(ofproto_packet_out);
2938 error = reject_slave_controller(ofconn, "OFPT_PACKET_OUT");
2943 /* Get ofp_packet_out. */
2945 request.size = ntohs(oh->length);
2946 opo = ofpbuf_try_pull(&request, offsetof(struct ofp_packet_out, actions));
2948 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
2952 error = ofputil_pull_actions(&request, ntohs(opo->actions_len),
2953 &ofp_actions, &n_ofp_actions);
2959 if (opo->buffer_id != htonl(UINT32_MAX)) {
2960 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2962 if (error || !buffer) {
2971 /* Extract flow, check actions. */
2972 flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)),
2974 error = validate_actions(ofp_actions, n_ofp_actions, &flow, p->max_ports);
2980 error = xlate_actions(ofp_actions, n_ofp_actions, &flow, p, &payload,
2981 &odp_actions, NULL, NULL, NULL);
2983 dpif_execute(p->dpif, odp_actions.actions, odp_actions.n_actions,
2988 ofpbuf_delete(buffer);
2993 update_port_config(struct ofproto *p, struct ofport *port,
2994 uint32_t config, uint32_t mask)
2996 mask &= config ^ port->opp.config;
2997 if (mask & OFPPC_PORT_DOWN) {
2998 if (config & OFPPC_PORT_DOWN) {
2999 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
3001 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
3004 #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | \
3005 OFPPC_NO_FWD | OFPPC_NO_FLOOD)
3006 if (mask & REVALIDATE_BITS) {
3007 COVERAGE_INC(ofproto_costly_flags);
3008 port->opp.config ^= mask & REVALIDATE_BITS;
3009 p->need_revalidate = true;
3011 #undef REVALIDATE_BITS
3012 if (mask & OFPPC_NO_PACKET_IN) {
3013 port->opp.config ^= OFPPC_NO_PACKET_IN;
3018 handle_port_mod(struct ofconn *ofconn, struct ofp_header *oh)
3020 struct ofproto *p = ofconn->ofproto;
3021 const struct ofp_port_mod *opm;
3022 struct ofport *port;
3025 error = reject_slave_controller(ofconn, "OFPT_PORT_MOD");
3029 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
3033 opm = (struct ofp_port_mod *) oh;
3035 port = get_port(p, ofp_port_to_odp_port(ntohs(opm->port_no)));
3037 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
3038 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
3039 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
3041 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
3042 if (opm->advertise) {
3043 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
3049 static struct ofpbuf *
3050 make_ofp_stats_reply(ovs_be32 xid, ovs_be16 type, size_t body_len)
3052 struct ofp_stats_reply *osr;
3055 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
3056 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
3058 osr->flags = htons(0);
3062 static struct ofpbuf *
3063 start_ofp_stats_reply(const struct ofp_stats_request *request, size_t body_len)
3065 return make_ofp_stats_reply(request->header.xid, request->type, body_len);
3069 append_ofp_stats_reply(size_t nbytes, struct ofconn *ofconn,
3070 struct ofpbuf **msgp)
3072 struct ofpbuf *msg = *msgp;
3073 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
3074 if (nbytes + msg->size > UINT16_MAX) {
3075 struct ofp_stats_reply *reply = msg->data;
3076 reply->flags = htons(OFPSF_REPLY_MORE);
3077 *msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes);
3078 queue_tx(msg, ofconn, ofconn->reply_counter);
3080 return ofpbuf_put_uninit(*msgp, nbytes);
3083 static struct ofpbuf *
3084 make_nxstats_reply(ovs_be32 xid, ovs_be32 subtype, size_t body_len)
3086 struct nicira_stats_msg *nsm;
3089 msg = ofpbuf_new(MIN(sizeof *nsm + body_len, UINT16_MAX));
3090 nsm = put_openflow_xid(sizeof *nsm, OFPT_STATS_REPLY, xid, msg);
3091 nsm->type = htons(OFPST_VENDOR);
3092 nsm->flags = htons(0);
3093 nsm->vendor = htonl(NX_VENDOR_ID);
3094 nsm->subtype = htonl(subtype);
3098 static struct ofpbuf *
3099 start_nxstats_reply(const struct nicira_stats_msg *request, size_t body_len)
3101 return make_nxstats_reply(request->header.xid, request->subtype, body_len);
3105 append_nxstats_reply(size_t nbytes, struct ofconn *ofconn,
3106 struct ofpbuf **msgp)
3108 struct ofpbuf *msg = *msgp;
3109 assert(nbytes <= UINT16_MAX - sizeof(struct nicira_stats_msg));
3110 if (nbytes + msg->size > UINT16_MAX) {
3111 struct nicira_stats_msg *reply = msg->data;
3112 reply->flags = htons(OFPSF_REPLY_MORE);
3113 *msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes);
3114 queue_tx(msg, ofconn, ofconn->reply_counter);
3116 ofpbuf_prealloc_tailroom(*msgp, nbytes);
3120 handle_desc_stats_request(struct ofconn *ofconn,
3121 struct ofp_stats_request *request)
3123 struct ofproto *p = ofconn->ofproto;
3124 struct ofp_desc_stats *ods;
3127 msg = start_ofp_stats_reply(request, sizeof *ods);
3128 ods = append_ofp_stats_reply(sizeof *ods, ofconn, &msg);
3129 memset(ods, 0, sizeof *ods);
3130 ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc);
3131 ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc);
3132 ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
3133 ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
3134 ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
3135 queue_tx(msg, ofconn, ofconn->reply_counter);
3141 handle_table_stats_request(struct ofconn *ofconn,
3142 struct ofp_stats_request *request)
3144 struct ofproto *p = ofconn->ofproto;
3145 struct ofp_table_stats *ots;
3150 msg = start_ofp_stats_reply(request, sizeof *ots * 2);
3152 /* Count rules other than subrules. */
3153 n_rules = classifier_count(&p->cls);
3154 CLASSIFIER_FOR_EACH_EXACT_RULE (rule, cr, &p->cls) {
3160 /* Classifier table. */
3161 ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg);
3162 memset(ots, 0, sizeof *ots);
3163 strcpy(ots->name, "classifier");
3164 ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10
3165 ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL));
3166 ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */
3167 ots->active_count = htonl(n_rules);
3168 ots->lookup_count = htonll(0); /* XXX */
3169 ots->matched_count = htonll(0); /* XXX */
3171 queue_tx(msg, ofconn, ofconn->reply_counter);
3176 append_port_stat(struct ofport *port, struct ofconn *ofconn,
3177 struct ofpbuf **msgp)
3179 struct netdev_stats stats;
3180 struct ofp_port_stats *ops;
3182 /* Intentionally ignore return value, since errors will set
3183 * 'stats' to all-1s, which is correct for OpenFlow, and
3184 * netdev_get_stats() will log errors. */
3185 netdev_get_stats(port->netdev, &stats);
3187 ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp);
3188 ops->port_no = htons(port->opp.port_no);
3189 memset(ops->pad, 0, sizeof ops->pad);
3190 ops->rx_packets = htonll(stats.rx_packets);
3191 ops->tx_packets = htonll(stats.tx_packets);
3192 ops->rx_bytes = htonll(stats.rx_bytes);
3193 ops->tx_bytes = htonll(stats.tx_bytes);
3194 ops->rx_dropped = htonll(stats.rx_dropped);
3195 ops->tx_dropped = htonll(stats.tx_dropped);
3196 ops->rx_errors = htonll(stats.rx_errors);
3197 ops->tx_errors = htonll(stats.tx_errors);
3198 ops->rx_frame_err = htonll(stats.rx_frame_errors);
3199 ops->rx_over_err = htonll(stats.rx_over_errors);
3200 ops->rx_crc_err = htonll(stats.rx_crc_errors);
3201 ops->collisions = htonll(stats.collisions);
3205 handle_port_stats_request(struct ofconn *ofconn, struct ofp_stats_request *osr,
3208 struct ofproto *p = ofconn->ofproto;
3209 struct ofp_port_stats_request *psr;
3210 struct ofp_port_stats *ops;
3212 struct ofport *port;
3214 if (arg_size != sizeof *psr) {
3215 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3217 psr = (struct ofp_port_stats_request *) osr->body;
3219 msg = start_ofp_stats_reply(osr, sizeof *ops * 16);
3220 if (psr->port_no != htons(OFPP_NONE)) {
3221 port = get_port(p, ofp_port_to_odp_port(ntohs(psr->port_no)));
3223 append_port_stat(port, ofconn, &msg);
3226 HMAP_FOR_EACH (port, hmap_node, &p->ports) {
3227 append_port_stat(port, ofconn, &msg);
3231 queue_tx(msg, ofconn, ofconn->reply_counter);
3235 struct flow_stats_cbdata {
3236 struct ofconn *ofconn;
3241 /* Obtains statistic counters for 'rule' within 'p' and stores them into
3242 * '*packet_countp' and '*byte_countp'. If 'rule' is a wildcarded rule, the
3243 * returned statistic include statistics for all of 'rule''s subrules. */
3245 query_stats(struct ofproto *p, struct rule *rule,
3246 uint64_t *packet_countp, uint64_t *byte_countp)
3248 uint64_t packet_count, byte_count;
3249 struct rule *subrule;
3250 struct odp_flow *odp_flows;
3253 /* Start from historical data for 'rule' itself that are no longer tracked
3254 * by the datapath. This counts, for example, subrules that have
3256 packet_count = rule->packet_count;
3257 byte_count = rule->byte_count;
3259 /* Prepare to ask the datapath for statistics on 'rule', or if it is
3260 * wildcarded then on all of its subrules.
3262 * Also, add any statistics that are not tracked by the datapath for each
3263 * subrule. This includes, for example, statistics for packets that were
3264 * executed "by hand" by ofproto via dpif_execute() but must be accounted
3266 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
3267 odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
3268 if (rule->cr.wc.wildcards) {
3270 LIST_FOR_EACH (subrule, list, &rule->list) {
3271 odp_flow_key_from_flow(&odp_flows[i++].key, &subrule->cr.flow);
3272 packet_count += subrule->packet_count;
3273 byte_count += subrule->byte_count;
3276 odp_flow_key_from_flow(&odp_flows[0].key, &rule->cr.flow);
3279 /* Fetch up-to-date statistics from the datapath and add them in. */
3280 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
3282 for (i = 0; i < n_odp_flows; i++) {
3283 struct odp_flow *odp_flow = &odp_flows[i];
3284 packet_count += odp_flow->stats.n_packets;
3285 byte_count += odp_flow->stats.n_bytes;
3290 /* Return the stats to the caller. */
3291 *packet_countp = packet_count;
3292 *byte_countp = byte_count;
3296 calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec)
3298 long long int msecs = time_msec() - start;
3299 *sec = htonl(msecs / 1000);
3300 *nsec = htonl((msecs % 1000) * (1000 * 1000));
3304 flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
3306 struct rule *rule = rule_from_cls_rule(rule_);
3307 struct flow_stats_cbdata *cbdata = cbdata_;
3308 struct ofp_flow_stats *ofs;
3309 uint64_t packet_count, byte_count;
3310 size_t act_len, len;
3312 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3316 act_len = sizeof *rule->actions * rule->n_actions;
3317 len = offsetof(struct ofp_flow_stats, actions) + act_len;
3319 query_stats(cbdata->ofconn->ofproto, rule, &packet_count, &byte_count);
3321 ofs = append_ofp_stats_reply(len, cbdata->ofconn, &cbdata->msg);
3322 ofs->length = htons(len);
3325 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3326 cbdata->ofconn->flow_format, &ofs->match);
3327 calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec);
3328 ofs->cookie = rule->flow_cookie;
3329 ofs->priority = htons(rule->cr.priority);
3330 ofs->idle_timeout = htons(rule->idle_timeout);
3331 ofs->hard_timeout = htons(rule->hard_timeout);
3332 memset(ofs->pad2, 0, sizeof ofs->pad2);
3333 ofs->packet_count = htonll(packet_count);
3334 ofs->byte_count = htonll(byte_count);
3335 if (rule->n_actions > 0) {
3336 memcpy(ofs->actions, rule->actions, act_len);
3341 table_id_to_include(uint8_t table_id)
3343 return table_id == 0 || table_id == 0xff ? CLS_INC_ALL : 0;
3347 handle_flow_stats_request(struct ofconn *ofconn,
3348 const struct ofp_stats_request *osr, size_t arg_size)
3350 struct ofp_flow_stats_request *fsr;
3351 struct flow_stats_cbdata cbdata;
3352 struct cls_rule target;
3354 if (arg_size != sizeof *fsr) {
3355 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3357 fsr = (struct ofp_flow_stats_request *) osr->body;
3359 COVERAGE_INC(ofproto_flows_req);
3360 cbdata.ofconn = ofconn;
3361 cbdata.out_port = fsr->out_port;
3362 cbdata.msg = start_ofp_stats_reply(osr, 1024);
3363 cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0, &target);
3364 classifier_for_each_match(&ofconn->ofproto->cls, &target,
3365 table_id_to_include(fsr->table_id),
3366 flow_stats_cb, &cbdata);
3367 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3372 nx_flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
3374 struct rule *rule = rule_from_cls_rule(rule_);
3375 struct flow_stats_cbdata *cbdata = cbdata_;
3376 struct nx_flow_stats *nfs;
3377 uint64_t packet_count, byte_count;
3378 size_t act_len, start_len;
3380 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3384 query_stats(cbdata->ofconn->ofproto, rule, &packet_count, &byte_count);
3386 act_len = sizeof *rule->actions * rule->n_actions;
3388 start_len = cbdata->msg->size;
3389 append_nxstats_reply(sizeof *nfs + NXM_MAX_LEN + act_len,
3390 cbdata->ofconn, &cbdata->msg);
3391 nfs = ofpbuf_put_uninit(cbdata->msg, sizeof *nfs);
3394 calc_flow_duration(rule->created, &nfs->duration_sec, &nfs->duration_nsec);
3395 nfs->cookie = rule->flow_cookie;
3396 nfs->priority = htons(rule->cr.priority);
3397 nfs->idle_timeout = htons(rule->idle_timeout);
3398 nfs->hard_timeout = htons(rule->hard_timeout);
3399 nfs->match_len = htons(nx_put_match(cbdata->msg, &rule->cr));
3400 memset(nfs->pad2, 0, sizeof nfs->pad2);
3401 nfs->packet_count = htonll(packet_count);
3402 nfs->byte_count = htonll(byte_count);
3403 if (rule->n_actions > 0) {
3404 ofpbuf_put(cbdata->msg, rule->actions, act_len);
3406 nfs->length = htons(cbdata->msg->size - start_len);
3410 handle_nxst_flow(struct ofconn *ofconn, struct ofpbuf *b)
3412 struct nx_flow_stats_request *nfsr;
3413 struct flow_stats_cbdata cbdata;
3414 struct cls_rule target;
3417 /* Dissect the message. */
3418 nfsr = ofpbuf_try_pull(b, sizeof *nfsr);
3420 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3422 error = nx_pull_match(b, ntohs(nfsr->match_len), 0, &target);
3427 COVERAGE_INC(ofproto_flows_req);
3428 cbdata.ofconn = ofconn;
3429 cbdata.out_port = nfsr->out_port;
3430 cbdata.msg = start_nxstats_reply(&nfsr->nsm, 1024);
3431 classifier_for_each_match(&ofconn->ofproto->cls, &target,
3432 table_id_to_include(nfsr->table_id),
3433 nx_flow_stats_cb, &cbdata);
3434 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3438 struct flow_stats_ds_cbdata {
3439 struct ofproto *ofproto;
3444 flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
3446 struct rule *rule = rule_from_cls_rule(rule_);
3447 struct flow_stats_ds_cbdata *cbdata = cbdata_;
3448 struct ds *results = cbdata->results;
3449 struct ofp_match match;
3450 uint64_t packet_count, byte_count;
3451 size_t act_len = sizeof *rule->actions * rule->n_actions;
3453 /* Don't report on subrules. */
3454 if (rule->super != NULL) {
3458 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3459 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3460 NXFF_OPENFLOW10, &match);
3462 ds_put_format(results, "duration=%llds, ",
3463 (time_msec() - rule->created) / 1000);
3464 ds_put_format(results, "priority=%u, ", rule->cr.priority);
3465 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
3466 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
3467 ofp_print_match(results, &match, true);
3469 ofp_print_actions(results, &rule->actions->header, act_len);
3471 ds_put_cstr(results, "drop");
3473 ds_put_cstr(results, "\n");
3476 /* Adds a pretty-printed description of all flows to 'results', including
3477 * those marked hidden by secchan (e.g., by in-band control). */
3479 ofproto_get_all_flows(struct ofproto *p, struct ds *results)
3481 struct ofp_match match;
3482 struct cls_rule target;
3483 struct flow_stats_ds_cbdata cbdata;
3485 memset(&match, 0, sizeof match);
3486 match.wildcards = htonl(OVSFW_ALL);
3489 cbdata.results = results;
3491 cls_rule_from_match(&match, 0, NXFF_OPENFLOW10, 0, &target);
3492 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3493 flow_stats_ds_cb, &cbdata);
3496 struct aggregate_stats_cbdata {
3497 struct ofproto *ofproto;
3499 uint64_t packet_count;
3500 uint64_t byte_count;
3505 aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
3507 struct rule *rule = rule_from_cls_rule(rule_);
3508 struct aggregate_stats_cbdata *cbdata = cbdata_;
3509 uint64_t packet_count, byte_count;
3511 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3515 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3517 cbdata->packet_count += packet_count;
3518 cbdata->byte_count += byte_count;
3523 query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target,
3524 ovs_be16 out_port, uint8_t table_id,
3525 struct ofp_aggregate_stats_reply *oasr)
3527 struct aggregate_stats_cbdata cbdata;
3529 COVERAGE_INC(ofproto_agg_request);
3530 cbdata.ofproto = ofproto;
3531 cbdata.out_port = out_port;
3532 cbdata.packet_count = 0;
3533 cbdata.byte_count = 0;
3535 classifier_for_each_match(&ofproto->cls, target,
3536 table_id_to_include(table_id),
3537 aggregate_stats_cb, &cbdata);
3539 oasr->flow_count = htonl(cbdata.n_flows);
3540 oasr->packet_count = htonll(cbdata.packet_count);
3541 oasr->byte_count = htonll(cbdata.byte_count);
3542 memset(oasr->pad, 0, sizeof oasr->pad);
3546 handle_aggregate_stats_request(struct ofconn *ofconn,
3547 const struct ofp_stats_request *osr,
3550 struct ofp_aggregate_stats_request *request;
3551 struct ofp_aggregate_stats_reply *reply;
3552 struct cls_rule target;
3555 if (arg_size != sizeof *request) {
3556 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3558 request = (struct ofp_aggregate_stats_request *) osr->body;
3560 cls_rule_from_match(&request->match, 0, NXFF_OPENFLOW10, 0, &target);
3562 msg = start_ofp_stats_reply(osr, sizeof *reply);
3563 reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg);
3564 query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
3565 request->table_id, reply);
3566 queue_tx(msg, ofconn, ofconn->reply_counter);
3571 handle_nxst_aggregate(struct ofconn *ofconn, struct ofpbuf *b)
3573 struct nx_aggregate_stats_request *request;
3574 struct ofp_aggregate_stats_reply *reply;
3575 struct cls_rule target;
3579 /* Dissect the message. */
3580 request = ofpbuf_try_pull(b, sizeof *request);
3582 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3584 error = nx_pull_match(b, ntohs(request->match_len), 0, &target);
3590 COVERAGE_INC(ofproto_flows_req);
3591 buf = start_nxstats_reply(&request->nsm, sizeof *reply);
3592 reply = ofpbuf_put_uninit(buf, sizeof *reply);
3593 query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
3594 request->table_id, reply);
3595 queue_tx(buf, ofconn, ofconn->reply_counter);
3600 struct queue_stats_cbdata {
3601 struct ofconn *ofconn;
3602 struct ofport *ofport;
3607 put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
3608 const struct netdev_queue_stats *stats)
3610 struct ofp_queue_stats *reply;
3612 reply = append_ofp_stats_reply(sizeof *reply, cbdata->ofconn, &cbdata->msg);
3613 reply->port_no = htons(cbdata->ofport->opp.port_no);
3614 memset(reply->pad, 0, sizeof reply->pad);
3615 reply->queue_id = htonl(queue_id);
3616 reply->tx_bytes = htonll(stats->tx_bytes);
3617 reply->tx_packets = htonll(stats->tx_packets);
3618 reply->tx_errors = htonll(stats->tx_errors);
3622 handle_queue_stats_dump_cb(uint32_t queue_id,
3623 struct netdev_queue_stats *stats,
3626 struct queue_stats_cbdata *cbdata = cbdata_;
3628 put_queue_stats(cbdata, queue_id, stats);
3632 handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id,
3633 struct queue_stats_cbdata *cbdata)
3635 cbdata->ofport = port;
3636 if (queue_id == OFPQ_ALL) {
3637 netdev_dump_queue_stats(port->netdev,
3638 handle_queue_stats_dump_cb, cbdata);
3640 struct netdev_queue_stats stats;
3642 if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) {
3643 put_queue_stats(cbdata, queue_id, &stats);
3649 handle_queue_stats_request(struct ofconn *ofconn,
3650 const struct ofp_stats_request *osr,
3653 struct ofproto *ofproto = ofconn->ofproto;
3654 struct ofp_queue_stats_request *qsr;
3655 struct queue_stats_cbdata cbdata;
3656 struct ofport *port;
3657 unsigned int port_no;
3660 if (arg_size != sizeof *qsr) {
3661 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3663 qsr = (struct ofp_queue_stats_request *) osr->body;
3665 COVERAGE_INC(ofproto_queue_req);
3667 cbdata.ofconn = ofconn;
3668 cbdata.msg = start_ofp_stats_reply(osr, 128);
3670 port_no = ntohs(qsr->port_no);
3671 queue_id = ntohl(qsr->queue_id);
3672 if (port_no == OFPP_ALL) {
3673 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
3674 handle_queue_stats_for_port(port, queue_id, &cbdata);
3676 } else if (port_no < ofproto->max_ports) {
3677 port = get_port(ofproto, ofp_port_to_odp_port(port_no));
3679 handle_queue_stats_for_port(port, queue_id, &cbdata);
3682 ofpbuf_delete(cbdata.msg);
3683 return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
3685 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3691 handle_vendor_stats_request(struct ofconn *ofconn,
3692 struct ofp_stats_request *osr, size_t arg_size)
3694 struct nicira_stats_msg *nsm;
3699 VLOG_WARN_RL(&rl, "truncated vendor stats request body");
3700 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3703 memcpy(&vendor, osr->body, sizeof vendor);
3704 if (vendor != htonl(NX_VENDOR_ID)) {
3705 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3708 if (ntohs(osr->header.length) < sizeof(struct nicira_stats_msg)) {
3709 VLOG_WARN_RL(&rl, "truncated Nicira stats request");
3710 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3713 nsm = (struct nicira_stats_msg *) osr;
3715 b.size = ntohs(nsm->header.length);
3716 switch (ntohl(nsm->subtype)) {
3718 return handle_nxst_flow(ofconn, &b);
3720 case NXST_AGGREGATE:
3721 return handle_nxst_aggregate(ofconn, &b);
3724 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3729 handle_stats_request(struct ofconn *ofconn, struct ofp_header *oh)
3731 struct ofp_stats_request *osr;
3735 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
3740 osr = (struct ofp_stats_request *) oh;
3742 switch (ntohs(osr->type)) {
3744 return handle_desc_stats_request(ofconn, osr);
3747 return handle_flow_stats_request(ofconn, osr, arg_size);
3749 case OFPST_AGGREGATE:
3750 return handle_aggregate_stats_request(ofconn, osr, arg_size);
3753 return handle_table_stats_request(ofconn, osr);
3756 return handle_port_stats_request(ofconn, osr, arg_size);
3759 return handle_queue_stats_request(ofconn, osr, arg_size);
3762 return handle_vendor_stats_request(ofconn, osr, arg_size);
3765 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
3769 static long long int
3770 msec_from_nsec(uint64_t sec, uint32_t nsec)
3772 return !sec ? 0 : sec * 1000 + nsec / 1000000;
3776 update_time(struct ofproto *ofproto, struct rule *rule,
3777 const struct odp_flow_stats *stats)
3779 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
3780 if (used > rule->used) {
3782 if (rule->super && used > rule->super->used) {
3783 rule->super->used = used;
3785 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
3790 update_stats(struct ofproto *ofproto, struct rule *rule,
3791 const struct odp_flow_stats *stats)
3793 if (stats->n_packets) {
3794 update_time(ofproto, rule, stats);
3795 rule->packet_count += stats->n_packets;
3796 rule->byte_count += stats->n_bytes;
3797 netflow_flow_update_flags(&rule->nf_flow, stats->tcp_flags);
3805 uint16_t idle_timeout;
3806 uint16_t hard_timeout;
3810 union ofp_action *actions;
3814 /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
3815 * in which no matching flow already exists in the flow table.
3817 * Adds the flow specified by 'ofm', which is followed by 'n_actions'
3818 * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
3819 * OpenFlow error code as encoded by ofp_mkerr() on failure.
3821 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3824 add_flow(struct ofconn *ofconn, struct flow_mod *fm)
3826 struct ofproto *p = ofconn->ofproto;
3827 struct ofpbuf *packet;
3832 if (fm->flags & OFPFF_CHECK_OVERLAP
3833 && classifier_rule_overlaps(&p->cls, &fm->cr)) {
3834 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
3837 rule = rule_create(p, NULL, fm->actions, fm->n_actions,
3838 fm->idle_timeout, fm->hard_timeout, fm->cookie,
3839 fm->flags & OFPFF_SEND_FLOW_REM);
3843 if (fm->buffer_id != UINT32_MAX) {
3844 error = pktbuf_retrieve(ofconn->pktbuf, fm->buffer_id,
3848 in_port = UINT16_MAX;
3851 rule_insert(p, rule, packet, in_port);
3855 static struct rule *
3856 find_flow_strict(struct ofproto *p, const struct flow_mod *fm)
3858 return rule_from_cls_rule(classifier_find_rule_exactly(&p->cls, &fm->cr));
3862 send_buffered_packet(struct ofconn *ofconn,
3863 struct rule *rule, uint32_t buffer_id)
3865 struct ofpbuf *packet;
3870 if (buffer_id == UINT32_MAX) {
3874 error = pktbuf_retrieve(ofconn->pktbuf, buffer_id, &packet, &in_port);
3879 flow_extract(packet, 0, in_port, &flow);
3880 rule_execute(ofconn->ofproto, rule, packet, &flow);
3885 /* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
3887 struct modify_flows_cbdata {
3888 struct ofproto *ofproto;
3889 const struct flow_mod *fm;
3893 static int modify_flow(struct ofproto *, const struct flow_mod *,
3895 static void modify_flows_cb(struct cls_rule *, void *cbdata_);
3897 /* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
3898 * encoded by ofp_mkerr() on failure.
3900 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3903 modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
3905 struct modify_flows_cbdata cbdata;
3907 cbdata.ofproto = ofconn->ofproto;
3909 cbdata.match = NULL;
3911 classifier_for_each_match(&ofconn->ofproto->cls, &fm->cr, CLS_INC_ALL,
3912 modify_flows_cb, &cbdata);
3914 /* This credits the packet to whichever flow happened to happened to
3915 * match last. That's weird. Maybe we should do a lookup for the
3916 * flow that actually matches the packet? Who knows. */
3917 send_buffered_packet(ofconn, cbdata.match, fm->buffer_id);
3920 return add_flow(ofconn, fm);
3924 /* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
3925 * code as encoded by ofp_mkerr() on failure.
3927 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3930 modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm)
3932 struct ofproto *p = ofconn->ofproto;
3933 struct rule *rule = find_flow_strict(p, fm);
3934 if (rule && !rule_is_hidden(rule)) {
3935 modify_flow(p, fm, rule);
3936 return send_buffered_packet(ofconn, rule, fm->buffer_id);
3938 return add_flow(ofconn, fm);
3942 /* Callback for modify_flows_loose(). */
3944 modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
3946 struct rule *rule = rule_from_cls_rule(rule_);
3947 struct modify_flows_cbdata *cbdata = cbdata_;
3949 if (!rule_is_hidden(rule)) {
3950 cbdata->match = rule;
3951 modify_flow(cbdata->ofproto, cbdata->fm, rule);
3955 /* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
3956 * been identified as a flow in 'p''s flow table to be modified, by changing
3957 * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
3958 * ofp_action[] structures). */
3960 modify_flow(struct ofproto *p, const struct flow_mod *fm, struct rule *rule)
3962 size_t actions_len = fm->n_actions * sizeof *rule->actions;
3964 rule->flow_cookie = fm->cookie;
3966 /* If the actions are the same, do nothing. */
3967 if (fm->n_actions == rule->n_actions
3969 || !memcmp(fm->actions, rule->actions, actions_len))) {
3973 /* Replace actions. */
3974 free(rule->actions);
3975 rule->actions = fm->n_actions ? xmemdup(fm->actions, actions_len) : NULL;
3976 rule->n_actions = fm->n_actions;
3978 /* Make sure that the datapath gets updated properly. */
3979 if (rule->cr.wc.wildcards) {
3980 COVERAGE_INC(ofproto_mod_wc_flow);
3981 p->need_revalidate = true;
3983 rule_update_actions(p, rule);
3989 /* OFPFC_DELETE implementation. */
3991 struct delete_flows_cbdata {
3992 struct ofproto *ofproto;
3996 static void delete_flows_cb(struct cls_rule *, void *cbdata_);
3997 static void delete_flow(struct ofproto *, struct rule *, ovs_be16 out_port);
3999 /* Implements OFPFC_DELETE. */
4001 delete_flows_loose(struct ofproto *p, const struct flow_mod *fm)
4003 struct delete_flows_cbdata cbdata;
4006 cbdata.out_port = htons(fm->out_port);
4008 classifier_for_each_match(&p->cls, &fm->cr, CLS_INC_ALL,
4009 delete_flows_cb, &cbdata);
4012 /* Implements OFPFC_DELETE_STRICT. */
4014 delete_flow_strict(struct ofproto *p, struct flow_mod *fm)
4016 struct rule *rule = find_flow_strict(p, fm);
4018 delete_flow(p, rule, htons(fm->out_port));
4022 /* Callback for delete_flows_loose(). */
4024 delete_flows_cb(struct cls_rule *rule_, void *cbdata_)
4026 struct rule *rule = rule_from_cls_rule(rule_);
4027 struct delete_flows_cbdata *cbdata = cbdata_;
4029 delete_flow(cbdata->ofproto, rule, cbdata->out_port);
4032 /* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
4033 * been identified as a flow to delete from 'p''s flow table, by deleting the
4034 * flow and sending out a OFPT_FLOW_REMOVED message to any interested
4037 * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
4038 * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
4039 * specified 'out_port'. */
4041 delete_flow(struct ofproto *p, struct rule *rule, ovs_be16 out_port)
4043 if (rule_is_hidden(rule)) {
4047 if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
4051 send_flow_removed(p, rule, OFPRR_DELETE);
4052 rule_remove(p, rule);
4056 flow_mod_core(struct ofconn *ofconn, struct flow_mod *fm)
4058 struct ofproto *p = ofconn->ofproto;
4061 error = reject_slave_controller(ofconn, "flow_mod");
4066 error = validate_actions(fm->actions, fm->n_actions,
4067 &fm->cr.flow, p->max_ports);
4072 /* We do not support the emergency flow cache. It will hopefully
4073 * get dropped from OpenFlow in the near future. */
4074 if (fm->flags & OFPFF_EMERG) {
4075 /* There isn't a good fit for an error code, so just state that the
4076 * flow table is full. */
4077 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
4080 switch (fm->command) {
4082 return add_flow(ofconn, fm);
4085 return modify_flows_loose(ofconn, fm);
4087 case OFPFC_MODIFY_STRICT:
4088 return modify_flow_strict(ofconn, fm);
4091 delete_flows_loose(p, fm);
4094 case OFPFC_DELETE_STRICT:
4095 delete_flow_strict(p, fm);
4099 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
4104 handle_ofpt_flow_mod(struct ofconn *ofconn, struct ofp_header *oh)
4106 struct ofp_match orig_match;
4107 struct ofp_flow_mod *ofm;
4113 b.size = ntohs(oh->length);
4115 /* Dissect the message. */
4116 ofm = ofpbuf_try_pull(&b, sizeof *ofm);
4118 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4120 error = ofputil_pull_actions(&b, b.size, &fm.actions, &fm.n_actions);
4125 /* Normalize ofm->match. If normalization actually changes anything, then
4126 * log the differences. */
4127 ofm->match.pad1[0] = ofm->match.pad2[0] = 0;
4128 orig_match = ofm->match;
4129 normalize_match(&ofm->match);
4130 if (memcmp(&ofm->match, &orig_match, sizeof orig_match)) {
4131 static struct vlog_rate_limit normal_rl = VLOG_RATE_LIMIT_INIT(1, 1);
4132 if (!VLOG_DROP_INFO(&normal_rl)) {
4133 char *old = ofp_match_to_literal_string(&orig_match);
4134 char *new = ofp_match_to_literal_string(&ofm->match);
4135 VLOG_INFO("%s: normalization changed ofp_match, details:",
4136 rconn_get_name(ofconn->rconn));
4137 VLOG_INFO(" pre: %s", old);
4138 VLOG_INFO("post: %s", new);
4144 /* Translate the message. */
4145 cls_rule_from_match(&ofm->match, ntohs(ofm->priority), ofconn->flow_format,
4146 ofm->cookie, &fm.cr);
4147 fm.cookie = ofm->cookie;
4148 fm.command = ntohs(ofm->command);
4149 fm.idle_timeout = ntohs(ofm->idle_timeout);
4150 fm.hard_timeout = ntohs(ofm->hard_timeout);
4151 fm.buffer_id = ntohl(ofm->buffer_id);
4152 fm.out_port = ntohs(ofm->out_port);
4153 fm.flags = ntohs(ofm->flags);
4155 /* Execute the command. */
4156 return flow_mod_core(ofconn, &fm);
4160 handle_nxt_flow_mod(struct ofconn *ofconn, struct ofp_header *oh)
4162 struct nx_flow_mod *nfm;
4168 b.size = ntohs(oh->length);
4170 /* Dissect the message. */
4171 nfm = ofpbuf_try_pull(&b, sizeof *nfm);
4173 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4175 error = nx_pull_match(&b, ntohs(nfm->match_len), ntohs(nfm->priority),
4180 error = ofputil_pull_actions(&b, b.size, &fm.actions, &fm.n_actions);
4185 /* Translate the message. */
4186 fm.cookie = nfm->cookie;
4187 fm.command = ntohs(nfm->command);
4188 fm.idle_timeout = ntohs(nfm->idle_timeout);
4189 fm.hard_timeout = ntohs(nfm->hard_timeout);
4190 fm.buffer_id = ntohl(nfm->buffer_id);
4191 fm.out_port = ntohs(nfm->out_port);
4192 fm.flags = ntohs(nfm->flags);
4194 /* Execute the command. */
4195 return flow_mod_core(ofconn, &fm);
4199 handle_tun_id_from_cookie(struct ofconn *ofconn, struct nxt_tun_id_cookie *msg)
4203 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
4208 ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
4213 handle_role_request(struct ofconn *ofconn, struct nicira_header *msg)
4215 struct nx_role_request *nrr;
4216 struct nx_role_request *reply;
4220 if (ntohs(msg->header.length) != sizeof *nrr) {
4221 VLOG_WARN_RL(&rl, "received role request of length %u (expected %zu)",
4222 ntohs(msg->header.length), sizeof *nrr);
4223 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4225 nrr = (struct nx_role_request *) msg;
4227 if (ofconn->type != OFCONN_PRIMARY) {
4228 VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
4230 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
4233 role = ntohl(nrr->role);
4234 if (role != NX_ROLE_OTHER && role != NX_ROLE_MASTER
4235 && role != NX_ROLE_SLAVE) {
4236 VLOG_WARN_RL(&rl, "received request for unknown role %"PRIu32, role);
4238 /* There's no good error code for this. */
4239 return ofp_mkerr(OFPET_BAD_REQUEST, -1);
4242 if (role == NX_ROLE_MASTER) {
4243 struct ofconn *other;
4245 HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) {
4246 if (other->role == NX_ROLE_MASTER) {
4247 other->role = NX_ROLE_SLAVE;
4251 ofconn->role = role;
4253 reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, msg->header.xid,
4255 reply->role = htonl(role);
4256 queue_tx(buf, ofconn, ofconn->reply_counter);
4262 handle_nxt_set_flow_format(struct ofconn *ofconn,
4263 struct nxt_set_flow_format *msg)
4268 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
4273 format = ntohl(msg->format);
4274 if (format == NXFF_OPENFLOW10
4275 || format == NXFF_TUN_ID_FROM_COOKIE
4276 || format == NXFF_NXM) {
4277 ofconn->flow_format = format;
4280 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
4285 handle_vendor(struct ofconn *ofconn, void *msg)
4287 struct ofproto *p = ofconn->ofproto;
4288 struct ofp_vendor_header *ovh = msg;
4289 struct nicira_header *nh;
4291 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
4292 VLOG_WARN_RL(&rl, "received vendor message of length %u "
4293 "(expected at least %zu)",
4294 ntohs(ovh->header.length), sizeof(struct ofp_vendor_header));
4295 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4297 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
4298 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
4300 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
4301 VLOG_WARN_RL(&rl, "received Nicira vendor message of length %u "
4302 "(expected at least %zu)",
4303 ntohs(ovh->header.length), sizeof(struct nicira_header));
4304 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4308 switch (ntohl(nh->subtype)) {
4309 case NXT_STATUS_REQUEST:
4310 return switch_status_handle_request(p->switch_status, ofconn->rconn,
4313 case NXT_TUN_ID_FROM_COOKIE:
4314 return handle_tun_id_from_cookie(ofconn, msg);
4316 case NXT_ROLE_REQUEST:
4317 return handle_role_request(ofconn, msg);
4319 case NXT_SET_FLOW_FORMAT:
4320 return handle_nxt_set_flow_format(ofconn, msg);
4323 return handle_nxt_flow_mod(ofconn, &ovh->header);
4326 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
4330 handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh)
4332 struct ofp_header *ob;
4335 /* Currently, everything executes synchronously, so we can just
4336 * immediately send the barrier reply. */
4337 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
4338 queue_tx(buf, ofconn, ofconn->reply_counter);
4343 handle_openflow(struct ofconn *ofconn, struct ofpbuf *ofp_msg)
4345 struct ofp_header *oh = ofp_msg->data;
4348 COVERAGE_INC(ofproto_recv_openflow);
4350 case OFPT_ECHO_REQUEST:
4351 error = handle_echo_request(ofconn, oh);
4354 case OFPT_ECHO_REPLY:
4358 case OFPT_FEATURES_REQUEST:
4359 error = handle_features_request(ofconn, oh);
4362 case OFPT_GET_CONFIG_REQUEST:
4363 error = handle_get_config_request(ofconn, oh);
4366 case OFPT_SET_CONFIG:
4367 error = handle_set_config(ofconn, ofp_msg->data);
4370 case OFPT_PACKET_OUT:
4371 error = handle_packet_out(ofconn, ofp_msg->data);
4375 error = handle_port_mod(ofconn, oh);
4379 error = handle_ofpt_flow_mod(ofconn, ofp_msg->data);
4382 case OFPT_STATS_REQUEST:
4383 error = handle_stats_request(ofconn, oh);
4387 error = handle_vendor(ofconn, ofp_msg->data);
4390 case OFPT_BARRIER_REQUEST:
4391 error = handle_barrier_request(ofconn, oh);
4395 if (VLOG_IS_WARN_ENABLED()) {
4396 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
4397 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
4400 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
4405 send_error_oh(ofconn, ofp_msg->data, error);
4410 handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
4412 struct odp_msg *msg = packet->data;
4414 struct ofpbuf payload;
4417 payload.data = msg + 1;
4418 payload.size = msg->length - sizeof *msg;
4419 flow_extract(&payload, msg->arg, msg->port, &flow);
4421 /* Check with in-band control to see if this packet should be sent
4422 * to the local port regardless of the flow table. */
4423 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
4424 union odp_action action;
4426 memset(&action, 0, sizeof(action));
4427 action.output.type = ODPAT_OUTPUT;
4428 action.output.port = ODPP_LOCAL;
4429 dpif_execute(p->dpif, &action, 1, &payload);
4432 rule = lookup_valid_rule(p, &flow);
4434 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
4435 struct ofport *port = get_port(p, msg->port);
4437 if (port->opp.config & OFPPC_NO_PACKET_IN) {
4438 COVERAGE_INC(ofproto_no_packet_in);
4439 /* XXX install 'drop' flow entry */
4440 ofpbuf_delete(packet);
4444 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
4447 COVERAGE_INC(ofproto_packet_in);
4448 send_packet_in(p, packet);
4452 if (rule->cr.wc.wildcards) {
4453 rule = rule_create_subrule(p, rule, &flow);
4454 rule_make_actions(p, rule, packet);
4456 if (!rule->may_install) {
4457 /* The rule is not installable, that is, we need to process every
4458 * packet, so process the current packet and set its actions into
4460 rule_make_actions(p, rule, packet);
4462 /* XXX revalidate rule if it needs it */
4466 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY) {
4468 * Extra-special case for fail-open mode.
4470 * We are in fail-open mode and the packet matched the fail-open rule,
4471 * but we are connected to a controller too. We should send the packet
4472 * up to the controller in the hope that it will try to set up a flow
4473 * and thereby allow us to exit fail-open.
4475 * See the top-level comment in fail-open.c for more information.
4477 send_packet_in(p, ofpbuf_clone_with_headroom(packet,
4478 DPIF_RECV_MSG_PADDING));
4481 ofpbuf_pull(packet, sizeof *msg);
4482 rule_execute(p, rule, packet, &flow);
4483 rule_reinstall(p, rule);
4487 handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
4489 struct odp_msg *msg = packet->data;
4491 switch (msg->type) {
4492 case _ODPL_ACTION_NR:
4493 COVERAGE_INC(ofproto_ctlr_action);
4494 send_packet_in(p, packet);
4497 case _ODPL_SFLOW_NR:
4499 ofproto_sflow_received(p->sflow, msg);
4501 ofpbuf_delete(packet);
4505 handle_odp_miss_msg(p, packet);
4509 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
4515 /* Flow expiration. */
4517 struct expire_cbdata {
4518 struct ofproto *ofproto;
4522 static int ofproto_dp_max_idle(const struct ofproto *);
4523 static void ofproto_update_used(struct ofproto *);
4524 static void rule_expire(struct cls_rule *, void *cbdata);
4526 /* This function is called periodically by ofproto_run(). Its job is to
4527 * collect updates for the flows that have been installed into the datapath,
4528 * most importantly when they last were used, and then use that information to
4529 * expire flows that have not been used recently.
4531 * Returns the number of milliseconds after which it should be called again. */
4533 ofproto_expire(struct ofproto *ofproto)
4535 struct expire_cbdata cbdata;
4537 /* Update 'used' for each flow in the datapath. */
4538 ofproto_update_used(ofproto);
4540 /* Expire idle flows.
4542 * A wildcarded flow is idle only when all of its subrules have expired due
4543 * to becoming idle, so iterate through the exact-match flows first. */
4544 cbdata.ofproto = ofproto;
4545 cbdata.dp_max_idle = ofproto_dp_max_idle(ofproto);
4546 classifier_for_each(&ofproto->cls, CLS_INC_EXACT, rule_expire, &cbdata);
4547 classifier_for_each(&ofproto->cls, CLS_INC_WILD, rule_expire, &cbdata);
4549 /* Let the hook know that we're at a stable point: all outstanding data
4550 * in existing flows has been accounted to the account_cb. Thus, the
4551 * hook can now reasonably do operations that depend on having accurate
4552 * flow volume accounting (currently, that's just bond rebalancing). */
4553 if (ofproto->ofhooks->account_checkpoint_cb) {
4554 ofproto->ofhooks->account_checkpoint_cb(ofproto->aux);
4557 return MIN(cbdata.dp_max_idle, 1000);
4560 /* Update 'used' member of each flow currently installed into the datapath. */
4562 ofproto_update_used(struct ofproto *p)
4564 struct odp_flow *flows;
4569 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
4574 for (i = 0; i < n_flows; i++) {
4575 struct odp_flow *f = &flows[i];
4576 struct cls_rule target;
4580 odp_flow_key_to_flow(&f->key, &flow);
4581 cls_rule_init_exact(&flow, UINT16_MAX, &target);
4583 rule = rule_from_cls_rule(classifier_find_rule_exactly(&p->cls,
4586 if (rule && rule->installed) {
4587 update_time(p, rule, &f->stats);
4588 rule_account(p, rule, f->stats.n_bytes);
4590 /* There's a flow in the datapath that we know nothing about.
4592 COVERAGE_INC(ofproto_unexpected_rule);
4593 dpif_flow_del(p->dpif, f);
4600 /* Calculates and returns the number of milliseconds of idle time after which
4601 * flows should expire from the datapath and we should fold their statistics
4602 * into their parent rules in userspace. */
4604 ofproto_dp_max_idle(const struct ofproto *ofproto)
4607 * Idle time histogram.
4609 * Most of the time a switch has a relatively small number of flows. When
4610 * this is the case we might as well keep statistics for all of them in
4611 * userspace and to cache them in the kernel datapath for performance as
4614 * As the number of flows increases, the memory required to maintain
4615 * statistics about them in userspace and in the kernel becomes
4616 * significant. However, with a large number of flows it is likely that
4617 * only a few of them are "heavy hitters" that consume a large amount of
4618 * bandwidth. At this point, only heavy hitters are worth caching in the
4619 * kernel and maintaining in userspaces; other flows we can discard.
4621 * The technique used to compute the idle time is to build a histogram with
4622 * N_BUCKETS bucket whose width is BUCKET_WIDTH msecs each. Each flow that
4623 * is installed in the kernel gets dropped in the appropriate bucket.
4624 * After the histogram has been built, we compute the cutoff so that only
4625 * the most-recently-used 1% of flows (but at least 1000 flows) are kept
4626 * cached. At least the most-recently-used bucket of flows is kept, so
4627 * actually an arbitrary number of flows can be kept in any given
4628 * expiration run (though the next run will delete most of those unless
4629 * they receive additional data).
4631 * This requires a second pass through the exact-match flows, in addition
4632 * to the pass made by ofproto_update_used(), because the former function
4633 * never looks at uninstallable flows.
4635 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
4636 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
4637 int buckets[N_BUCKETS] = { 0 };
4643 total = classifier_count_exact(&ofproto->cls);
4644 if (total <= 1000) {
4645 return N_BUCKETS * BUCKET_WIDTH;
4648 /* Build histogram. */
4650 CLASSIFIER_FOR_EACH_EXACT_RULE (rule, cr, &ofproto->cls) {
4651 long long int idle = now - rule->used;
4652 int bucket = (idle <= 0 ? 0
4653 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
4654 : (unsigned int) idle / BUCKET_WIDTH);
4658 /* Find the first bucket whose flows should be expired. */
4659 for (bucket = 0; bucket < N_BUCKETS; bucket++) {
4660 if (buckets[bucket]) {
4663 subtotal += buckets[bucket++];
4664 } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
4669 if (VLOG_IS_DBG_ENABLED()) {
4673 ds_put_cstr(&s, "keep");
4674 for (i = 0; i < N_BUCKETS; i++) {
4676 ds_put_cstr(&s, ", drop");
4679 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
4682 VLOG_INFO("%s: %s (msec:count)",
4683 dpif_name(ofproto->dpif), ds_cstr(&s));
4687 return bucket * BUCKET_WIDTH;
4691 rule_active_timeout(struct ofproto *ofproto, struct rule *rule)
4693 if (ofproto->netflow && !is_controller_rule(rule) &&
4694 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
4695 struct ofexpired expired;
4696 struct odp_flow odp_flow;
4698 /* Get updated flow stats.
4700 * XXX We could avoid this call entirely if (1) ofproto_update_used()
4701 * updated TCP flags and (2) the dpif_flow_list_all() in
4702 * ofproto_update_used() zeroed TCP flags. */
4703 memset(&odp_flow, 0, sizeof odp_flow);
4704 if (rule->installed) {
4705 odp_flow_key_from_flow(&odp_flow.key, &rule->cr.flow);
4706 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
4707 dpif_flow_get(ofproto->dpif, &odp_flow);
4709 if (odp_flow.stats.n_packets) {
4710 update_time(ofproto, rule, &odp_flow.stats);
4711 netflow_flow_update_flags(&rule->nf_flow,
4712 odp_flow.stats.tcp_flags);
4716 expired.flow = rule->cr.flow;
4717 expired.packet_count = rule->packet_count +
4718 odp_flow.stats.n_packets;
4719 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
4720 expired.used = rule->used;
4722 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
4726 /* If 'cls_rule' is an OpenFlow rule, that has expired according to OpenFlow
4727 * rules, then delete it entirely.
4729 * If 'cls_rule' is a subrule, that has not been used recently, remove it from
4730 * the datapath and fold its statistics back into its super-rule.
4732 * (This is a callback function for classifier_for_each().) */
4734 rule_expire(struct cls_rule *cls_rule, void *cbdata_)
4736 struct expire_cbdata *cbdata = cbdata_;
4737 struct ofproto *ofproto = cbdata->ofproto;
4738 struct rule *rule = rule_from_cls_rule(cls_rule);
4739 long long int hard_expire, idle_expire, expire, now;
4741 /* Calculate OpenFlow expiration times for 'rule'. */
4742 hard_expire = (rule->hard_timeout
4743 ? rule->created + rule->hard_timeout * 1000
4745 idle_expire = (rule->idle_timeout
4746 && (rule->super || list_is_empty(&rule->list))
4747 ? rule->used + rule->idle_timeout * 1000
4749 expire = MIN(hard_expire, idle_expire);
4753 /* 'rule' has not expired according to OpenFlow rules. */
4754 if (!rule->cr.wc.wildcards) {
4755 if (now >= rule->used + cbdata->dp_max_idle) {
4756 /* This rule is idle, so drop it to free up resources. */
4758 /* It's not part of the OpenFlow flow table, so we can
4759 * delete it entirely and fold its statistics into its
4761 rule_remove(ofproto, rule);
4763 /* It is part of the OpenFlow flow table, so we have to
4764 * keep the rule but we can at least uninstall it from the
4766 rule_uninstall(ofproto, rule);
4769 /* Send NetFlow active timeout if appropriate. */
4770 rule_active_timeout(cbdata->ofproto, rule);
4774 /* 'rule' has expired according to OpenFlow rules. */
4775 COVERAGE_INC(ofproto_expired);
4777 /* Update stats. (This is a no-op if the rule expired due to an idle
4778 * timeout, because that only happens when the rule has no subrules
4780 if (rule->cr.wc.wildcards) {
4781 struct rule *subrule, *next;
4782 LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
4783 rule_remove(cbdata->ofproto, subrule);
4786 rule_uninstall(cbdata->ofproto, rule);
4789 /* Get rid of the rule. */
4790 if (!rule_is_hidden(rule)) {
4791 send_flow_removed(cbdata->ofproto, rule,
4793 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
4795 rule_remove(cbdata->ofproto, rule);
4800 revalidate_cb(struct cls_rule *sub_, void *cbdata_)
4802 struct rule *sub = rule_from_cls_rule(sub_);
4803 struct revalidate_cbdata *cbdata = cbdata_;
4805 if (cbdata->revalidate_all
4806 || (cbdata->revalidate_subrules && sub->super)
4807 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
4808 revalidate_rule(cbdata->ofproto, sub);
4813 revalidate_rule(struct ofproto *p, struct rule *rule)
4815 const struct flow *flow = &rule->cr.flow;
4817 COVERAGE_INC(ofproto_revalidate_rule);
4820 super = rule_from_cls_rule(classifier_lookup(&p->cls, flow,
4823 rule_remove(p, rule);
4825 } else if (super != rule->super) {
4826 COVERAGE_INC(ofproto_revalidate_moved);
4827 list_remove(&rule->list);
4828 list_push_back(&super->list, &rule->list);
4829 rule->super = super;
4830 rule->hard_timeout = super->hard_timeout;
4831 rule->idle_timeout = super->idle_timeout;
4832 rule->created = rule->used = super->created;
4836 rule_update_actions(p, rule);
4840 static struct ofpbuf *
4841 compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule,
4844 struct ofp_flow_removed *ofr;
4847 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
4848 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, ofconn->flow_format,
4850 ofr->cookie = rule->flow_cookie;
4851 ofr->priority = htons(rule->cr.priority);
4852 ofr->reason = reason;
4853 calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec);
4854 ofr->idle_timeout = htons(rule->idle_timeout);
4855 ofr->packet_count = htonll(rule->packet_count);
4856 ofr->byte_count = htonll(rule->byte_count);
4861 static struct ofpbuf *
4862 compose_nx_flow_removed(const struct rule *rule, uint8_t reason)
4864 struct nx_flow_removed *nfr;
4868 nfr = make_nxmsg(sizeof *nfr, NXT_FLOW_REMOVED, &buf);
4870 match_len = nx_put_match(buf, &rule->cr);
4872 nfr->cookie = rule->flow_cookie;
4873 nfr->priority = htons(rule->cr.priority);
4874 nfr->reason = reason;
4875 calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec);
4876 nfr->idle_timeout = htons(rule->idle_timeout);
4877 nfr->match_len = htons(match_len);
4878 nfr->packet_count = htonll(rule->packet_count);
4879 nfr->byte_count = htonll(rule->byte_count);
4885 send_flow_removed(struct ofproto *p, struct rule *rule, uint8_t reason)
4887 struct ofconn *ofconn;
4889 if (!rule->send_flow_removed) {
4893 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
4896 if (!rconn_is_connected(ofconn->rconn)
4897 || !ofconn_receives_async_msgs(ofconn)) {
4901 msg = (ofconn->flow_format == NXFF_NXM
4902 ? compose_nx_flow_removed(rule, reason)
4903 : compose_ofp_flow_removed(ofconn, rule, reason));
4905 /* Account flow expirations under ofconn->reply_counter, the counter
4906 * for replies to OpenFlow requests. That works because preventing
4907 * OpenFlow requests from being processed also prevents new flows from
4908 * being added (and expiring). (It also prevents processing OpenFlow
4909 * requests that would not add new flows, so it is imperfect.) */
4910 queue_tx(msg, ofconn, ofconn->reply_counter);
4914 /* pinsched callback for sending 'packet' on 'ofconn'. */
4916 do_send_packet_in(struct ofpbuf *packet, void *ofconn_)
4918 struct ofconn *ofconn = ofconn_;
4920 rconn_send_with_limit(ofconn->rconn, packet,
4921 ofconn->packet_in_counter, 100);
4924 /* Takes 'packet', which has been converted with do_convert_to_packet_in(), and
4925 * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s
4926 * packet scheduler for sending.
4928 * 'max_len' specifies the maximum number of bytes of the packet to send on
4929 * 'ofconn' (INT_MAX specifies no limit).
4931 * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise,
4932 * ownership is transferred to this function. */
4934 schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len,
4937 struct ofproto *ofproto = ofconn->ofproto;
4938 struct ofp_packet_in *opi = packet->data;
4939 uint16_t in_port = ofp_port_to_odp_port(ntohs(opi->in_port));
4940 int send_len, trim_size;
4944 if (opi->reason == OFPR_ACTION) {
4945 buffer_id = UINT32_MAX;
4946 } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
4947 buffer_id = pktbuf_get_null();
4948 } else if (!ofconn->pktbuf) {
4949 buffer_id = UINT32_MAX;
4951 struct ofpbuf payload;
4952 payload.data = opi->data;
4953 payload.size = packet->size - offsetof(struct ofp_packet_in, data);
4954 buffer_id = pktbuf_save(ofconn->pktbuf, &payload, in_port);
4957 /* Figure out how much of the packet to send. */
4958 send_len = ntohs(opi->total_len);
4959 if (buffer_id != UINT32_MAX) {
4960 send_len = MIN(send_len, ofconn->miss_send_len);
4962 send_len = MIN(send_len, max_len);
4964 /* Adjust packet length and clone if necessary. */
4965 trim_size = offsetof(struct ofp_packet_in, data) + send_len;
4967 packet = ofpbuf_clone_data(packet->data, trim_size);
4970 packet->size = trim_size;
4973 /* Update packet headers. */
4974 opi->buffer_id = htonl(buffer_id);
4975 update_openflow_length(packet);
4977 /* Hand over to packet scheduler. It might immediately call into
4978 * do_send_packet_in() or it might buffer it for a while (until a later
4979 * call to pinsched_run()). */
4980 pinsched_send(ofconn->schedulers[opi->reason], in_port,
4981 packet, do_send_packet_in, ofconn);
4984 /* Replace struct odp_msg header in 'packet' by equivalent struct
4985 * ofp_packet_in. The odp_msg must have sufficient headroom to do so (e.g. as
4986 * returned by dpif_recv()).
4988 * The conversion is not complete: the caller still needs to trim any unneeded
4989 * payload off the end of the buffer, set the length in the OpenFlow header,
4990 * and set buffer_id. Those require us to know the controller settings and so
4991 * must be done on a per-controller basis.
4993 * Returns the maximum number of bytes of the packet that should be sent to
4994 * the controller (INT_MAX if no limit). */
4996 do_convert_to_packet_in(struct ofpbuf *packet)
4998 struct odp_msg *msg = packet->data;
4999 struct ofp_packet_in *opi;
5005 /* Extract relevant header fields */
5006 if (msg->type == _ODPL_ACTION_NR) {
5007 reason = OFPR_ACTION;
5010 reason = OFPR_NO_MATCH;
5013 total_len = msg->length - sizeof *msg;
5014 in_port = odp_port_to_ofp_port(msg->port);
5016 /* Repurpose packet buffer by overwriting header. */
5017 ofpbuf_pull(packet, sizeof(struct odp_msg));
5018 opi = ofpbuf_push_zeros(packet, offsetof(struct ofp_packet_in, data));
5019 opi->header.version = OFP_VERSION;
5020 opi->header.type = OFPT_PACKET_IN;
5021 opi->total_len = htons(total_len);
5022 opi->in_port = htons(in_port);
5023 opi->reason = reason;
5028 /* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or
5029 * _ODPL_MISS_NR, sends an OFPT_PACKET_IN message to each OpenFlow controller
5030 * as necessary according to their individual configurations.
5032 * 'packet' must have sufficient headroom to convert it into a struct
5033 * ofp_packet_in (e.g. as returned by dpif_recv()).
5035 * Takes ownership of 'packet'. */
5037 send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet)
5039 struct ofconn *ofconn, *prev;
5042 max_len = do_convert_to_packet_in(packet);
5045 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
5046 if (ofconn_receives_async_msgs(ofconn)) {
5048 schedule_packet_in(prev, packet, max_len, true);
5054 schedule_packet_in(prev, packet, max_len, false);
5056 ofpbuf_delete(packet);
5061 pick_datapath_id(const struct ofproto *ofproto)
5063 const struct ofport *port;
5065 port = get_port(ofproto, ODPP_LOCAL);
5067 uint8_t ea[ETH_ADDR_LEN];
5070 error = netdev_get_etheraddr(port->netdev, ea);
5072 return eth_addr_to_uint64(ea);
5074 VLOG_WARN("could not get MAC address for %s (%s)",
5075 netdev_get_name(port->netdev), strerror(error));
5077 return ofproto->fallback_dpid;
5081 pick_fallback_dpid(void)
5083 uint8_t ea[ETH_ADDR_LEN];
5084 eth_addr_nicira_random(ea);
5085 return eth_addr_to_uint64(ea);
5089 default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet,
5090 struct odp_actions *actions, tag_type *tags,
5091 uint16_t *nf_output_iface, void *ofproto_)
5093 struct ofproto *ofproto = ofproto_;
5096 /* Drop frames for reserved multicast addresses. */
5097 if (eth_addr_is_reserved(flow->dl_dst)) {
5101 /* Learn source MAC (but don't try to learn from revalidation). */
5102 if (packet != NULL) {
5103 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
5105 GRAT_ARP_LOCK_NONE);
5107 /* The log messages here could actually be useful in debugging,
5108 * so keep the rate limit relatively high. */
5109 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
5110 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
5111 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
5112 ofproto_revalidate(ofproto, rev_tag);
5116 /* Determine output port. */
5117 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags,
5120 flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD,
5121 nf_output_iface, actions);
5122 } else if (out_port != flow->in_port) {
5123 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
5124 *nf_output_iface = out_port;
5132 static const struct ofhooks default_ofhooks = {
5133 default_normal_ofhook_cb,