2 * Copyright (c) 2009, 2010 Nicira Networks.
3 * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
22 #include <sys/socket.h>
24 #include <netinet/in.h>
27 #include "byte-order.h"
28 #include "classifier.h"
30 #include "discovery.h"
32 #include "dynamic-string.h"
33 #include "fail-open.h"
37 #include "mac-learning.h"
42 #include "ofp-print.h"
44 #include "ofproto-sflow.h"
46 #include "openflow/nicira-ext.h"
47 #include "openflow/openflow.h"
48 #include "openvswitch/datapath-protocol.h"
52 #include "poll-loop.h"
56 #include "stream-ssl.h"
64 VLOG_DEFINE_THIS_MODULE(ofproto);
66 #include "sflow_api.h"
69 struct hmap_node hmap_node; /* In struct ofproto's "ports" hmap. */
70 struct netdev *netdev;
71 struct ofp_phy_port opp; /* In host byte order. */
75 static void ofport_free(struct ofport *);
76 static void hton_ofp_phy_port(struct ofp_phy_port *);
78 static int xlate_actions(const union ofp_action *in, size_t n_in,
79 const struct flow *, struct ofproto *,
80 const struct ofpbuf *packet,
81 struct odp_actions *out, tag_type *tags,
82 bool *may_set_up_flow, uint16_t *nf_output_iface);
84 /* An OpenFlow flow. */
86 long long int used; /* Time last used; time created if not used. */
87 long long int created; /* Creation time. */
91 * - Do include packets and bytes from facets that have been deleted or
92 * whose own statistics have been folded into the rule.
94 * - Do include packets and bytes sent "by hand" that were accounted to
95 * the rule without any facet being involved (this is a rare corner
96 * case in rule_execute()).
98 * - Do not include packet or bytes that can be obtained from any facet's
99 * packet_count or byte_count member or that can be obtained from the
100 * datapath by, e.g., dpif_flow_get() for any facet.
102 uint64_t packet_count; /* Number of packets received. */
103 uint64_t byte_count; /* Number of bytes received. */
105 ovs_be64 flow_cookie; /* Controller-issued identifier. */
107 struct cls_rule cr; /* In owning ofproto's classifier. */
108 uint16_t idle_timeout; /* In seconds from time of last use. */
109 uint16_t hard_timeout; /* In seconds from time of creation. */
110 bool send_flow_removed; /* Send a flow removed message? */
111 int n_actions; /* Number of elements in actions[]. */
112 union ofp_action *actions; /* OpenFlow actions. */
113 struct list facets; /* List of "struct facet"s. */
116 static struct rule *rule_from_cls_rule(const struct cls_rule *);
117 static bool rule_is_hidden(const struct rule *);
119 static struct rule *rule_create(const struct cls_rule *,
120 const union ofp_action *, size_t n_actions,
121 uint16_t idle_timeout, uint16_t hard_timeout,
122 ovs_be64 flow_cookie, bool send_flow_removed);
123 static void rule_destroy(struct ofproto *, struct rule *);
124 static void rule_free(struct rule *);
126 static struct rule *rule_lookup(struct ofproto *, const struct flow *);
127 static void rule_insert(struct ofproto *, struct rule *);
128 static void rule_remove(struct ofproto *, struct rule *);
130 static void rule_send_removed(struct ofproto *, struct rule *, uint8_t reason);
132 /* An exact-match instantiation of an OpenFlow flow. */
134 long long int used; /* Time last used; time created if not used. */
138 * - Do include packets and bytes sent "by hand", e.g. with
141 * - Do include packets and bytes that were obtained from the datapath
142 * when a flow was deleted (e.g. dpif_flow_del()) or when its
143 * statistics were reset (e.g. dpif_flow_put() with ODPPF_ZERO_STATS).
145 * - Do not include any packets or bytes that can currently be obtained
146 * from the datapath by, e.g., dpif_flow_get().
148 uint64_t packet_count; /* Number of packets received. */
149 uint64_t byte_count; /* Number of bytes received. */
151 /* Number of bytes passed to account_cb. This may include bytes that can
152 * currently obtained from the datapath (thus, it can be greater than
154 uint64_t accounted_bytes;
156 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
157 struct list list_node; /* In owning rule's 'facets' list. */
158 struct rule *rule; /* Owning rule. */
159 struct flow flow; /* Exact-match flow. */
160 bool installed; /* Installed in datapath? */
161 bool may_install; /* True ordinarily; false if actions must
162 * be reassessed for every packet. */
163 int n_actions; /* Number of elements in actions[]. */
164 union odp_action *actions; /* Datapath actions. */
165 tag_type tags; /* Tags (set only by hooks). */
166 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
169 static struct facet *facet_create(struct ofproto *, struct rule *,
171 const struct ofpbuf *packet);
172 static void facet_remove(struct ofproto *, struct facet *);
173 static void facet_free(struct facet *);
175 static struct facet *facet_lookup_valid(struct ofproto *, const struct flow *);
176 static bool facet_revalidate(struct ofproto *, struct facet *);
178 static void facet_install(struct ofproto *, struct facet *, bool zero_stats);
179 static void facet_uninstall(struct ofproto *, struct facet *);
180 static void facet_flush_stats(struct ofproto *, struct facet *);
182 static void facet_make_actions(struct ofproto *, struct facet *,
183 const struct ofpbuf *packet);
184 static void facet_update_stats(struct ofproto *, struct facet *,
185 const struct odp_flow_stats *);
187 /* ofproto supports two kinds of OpenFlow connections:
189 * - "Primary" connections to ordinary OpenFlow controllers. ofproto
190 * maintains persistent connections to these controllers and by default
191 * sends them asynchronous messages such as packet-ins.
193 * - "Service" connections, e.g. from ovs-ofctl. When these connections
194 * drop, it is the other side's responsibility to reconnect them if
195 * necessary. ofproto does not send them asynchronous messages by default.
197 * Currently, active (tcp, ssl, unix) connections are always "primary"
198 * connections and passive (ptcp, pssl, punix) connections are always "service"
199 * connections. There is no inherent reason for this, but it reflects the
203 OFCONN_PRIMARY, /* An ordinary OpenFlow controller. */
204 OFCONN_SERVICE /* A service connection, e.g. "ovs-ofctl". */
207 /* A listener for incoming OpenFlow "service" connections. */
209 struct hmap_node node; /* In struct ofproto's "services" hmap. */
210 struct pvconn *pvconn; /* OpenFlow connection listener. */
212 /* These are not used by ofservice directly. They are settings for
213 * accepted "struct ofconn"s from the pvconn. */
214 int probe_interval; /* Max idle time before probing, in seconds. */
215 int rate_limit; /* Max packet-in rate in packets per second. */
216 int burst_limit; /* Limit on accumulating packet credits. */
219 static struct ofservice *ofservice_lookup(struct ofproto *,
221 static int ofservice_create(struct ofproto *,
222 const struct ofproto_controller *);
223 static void ofservice_reconfigure(struct ofservice *,
224 const struct ofproto_controller *);
225 static void ofservice_destroy(struct ofproto *, struct ofservice *);
227 /* An OpenFlow connection. */
229 struct ofproto *ofproto; /* The ofproto that owns this connection. */
230 struct list node; /* In struct ofproto's "all_conns" list. */
231 struct rconn *rconn; /* OpenFlow connection. */
232 enum ofconn_type type; /* Type. */
233 int flow_format; /* One of NXFF_*. */
235 /* OFPT_PACKET_IN related data. */
236 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
237 struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */
238 struct pktbuf *pktbuf; /* OpenFlow packet buffers. */
239 int miss_send_len; /* Bytes to send of buffered packets. */
241 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
242 * requests, and the maximum number before we stop reading OpenFlow
244 #define OFCONN_REPLY_MAX 100
245 struct rconn_packet_counter *reply_counter;
247 /* type == OFCONN_PRIMARY only. */
248 enum nx_role role; /* Role. */
249 struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */
250 struct discovery *discovery; /* Controller discovery object, if enabled. */
251 struct status_category *ss; /* Switch status category. */
252 enum ofproto_band band; /* In-band or out-of-band? */
255 /* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's
256 * "schedulers" array. Their values are 0 and 1, and their meanings and values
257 * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In
258 * case anything ever changes, check their values here. */
259 #define N_SCHEDULERS 2
260 BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0);
261 BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR);
262 BUILD_ASSERT_DECL(OFPR_ACTION == 1);
263 BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR);
265 static struct ofconn *ofconn_create(struct ofproto *, struct rconn *,
267 static void ofconn_destroy(struct ofconn *);
268 static void ofconn_run(struct ofconn *);
269 static void ofconn_wait(struct ofconn *);
270 static bool ofconn_receives_async_msgs(const struct ofconn *);
271 static char *ofconn_make_name(const struct ofproto *, const char *target);
272 static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
274 static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
275 struct rconn_packet_counter *counter);
277 static void send_packet_in(struct ofproto *, struct ofpbuf *odp_msg);
278 static void do_send_packet_in(struct ofpbuf *odp_msg, void *ofconn);
282 uint64_t datapath_id; /* Datapath ID. */
283 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
284 char *mfr_desc; /* Manufacturer. */
285 char *hw_desc; /* Hardware. */
286 char *sw_desc; /* Software version. */
287 char *serial_desc; /* Serial number. */
288 char *dp_desc; /* Datapath description. */
292 struct netdev_monitor *netdev_monitor;
293 struct hmap ports; /* Contains "struct ofport"s. */
294 struct shash port_by_name;
298 struct switch_status *switch_status;
299 struct fail_open *fail_open;
300 struct netflow *netflow;
301 struct ofproto_sflow *sflow;
303 /* In-band control. */
304 struct in_band *in_band;
305 long long int next_in_band_update;
306 struct sockaddr_in *extra_in_band_remotes;
307 size_t n_extra_remotes;
310 struct classifier cls;
311 long long int next_expiration;
315 bool need_revalidate;
316 struct tag_set revalidate_set;
318 /* OpenFlow connections. */
319 struct hmap controllers; /* Controller "struct ofconn"s. */
320 struct list all_conns; /* Contains "struct ofconn"s. */
321 enum ofproto_fail_mode fail_mode;
323 /* OpenFlow listeners. */
324 struct hmap services; /* Contains "struct ofservice"s. */
325 struct pvconn **snoops;
328 /* Hooks for ovs-vswitchd. */
329 const struct ofhooks *ofhooks;
332 /* Used by default ofhooks. */
333 struct mac_learning *ml;
336 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
338 static const struct ofhooks default_ofhooks;
340 static uint64_t pick_datapath_id(const struct ofproto *);
341 static uint64_t pick_fallback_dpid(void);
343 static int ofproto_expire(struct ofproto *);
345 static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
347 static void handle_openflow(struct ofconn *, struct ofpbuf *);
349 static struct ofport *get_port(const struct ofproto *, uint16_t odp_port);
350 static void update_port(struct ofproto *, const char *devname);
351 static int init_ports(struct ofproto *);
352 static void reinit_ports(struct ofproto *);
355 ofproto_create(const char *datapath, const char *datapath_type,
356 const struct ofhooks *ofhooks, void *aux,
357 struct ofproto **ofprotop)
359 struct odp_stats stats;
366 /* Connect to datapath and start listening for messages. */
367 error = dpif_open(datapath, datapath_type, &dpif);
369 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
372 error = dpif_get_dp_stats(dpif, &stats);
374 VLOG_ERR("failed to obtain stats for datapath %s: %s",
375 datapath, strerror(error));
379 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
381 VLOG_ERR("failed to listen on datapath %s: %s",
382 datapath, strerror(error));
386 dpif_flow_flush(dpif);
387 dpif_recv_purge(dpif);
389 /* Initialize settings. */
390 p = xzalloc(sizeof *p);
391 p->fallback_dpid = pick_fallback_dpid();
392 p->datapath_id = p->fallback_dpid;
393 p->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
394 p->hw_desc = xstrdup(DEFAULT_HW_DESC);
395 p->sw_desc = xstrdup(DEFAULT_SW_DESC);
396 p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
397 p->dp_desc = xstrdup(DEFAULT_DP_DESC);
399 /* Initialize datapath. */
401 p->netdev_monitor = netdev_monitor_create();
402 hmap_init(&p->ports);
403 shash_init(&p->port_by_name);
404 p->max_ports = stats.max_ports;
406 /* Initialize submodules. */
407 p->switch_status = switch_status_create(p);
413 /* Initialize flow table. */
414 classifier_init(&p->cls);
415 p->next_expiration = time_msec() + 1000;
417 /* Initialize facet table. */
418 hmap_init(&p->facets);
419 p->need_revalidate = false;
420 tag_set_init(&p->revalidate_set);
422 /* Initialize OpenFlow connections. */
423 list_init(&p->all_conns);
424 hmap_init(&p->controllers);
425 hmap_init(&p->services);
429 /* Initialize hooks. */
431 p->ofhooks = ofhooks;
435 p->ofhooks = &default_ofhooks;
437 p->ml = mac_learning_create();
440 /* Pick final datapath ID. */
441 p->datapath_id = pick_datapath_id(p);
442 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
449 ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
451 uint64_t old_dpid = p->datapath_id;
452 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
453 if (p->datapath_id != old_dpid) {
454 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
456 /* Force all active connections to reconnect, since there is no way to
457 * notify a controller that the datapath ID has changed. */
458 ofproto_reconnect_controllers(p);
463 is_discovery_controller(const struct ofproto_controller *c)
465 return !strcmp(c->target, "discover");
469 is_in_band_controller(const struct ofproto_controller *c)
471 return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND;
474 /* Creates a new controller in 'ofproto'. Some of the settings are initially
475 * drawn from 'c', but update_controller() needs to be called later to finish
476 * the new ofconn's configuration. */
478 add_controller(struct ofproto *ofproto, const struct ofproto_controller *c)
480 struct discovery *discovery;
481 struct ofconn *ofconn;
483 if (is_discovery_controller(c)) {
484 int error = discovery_create(c->accept_re, c->update_resolv_conf,
485 ofproto->dpif, ofproto->switch_status,
494 ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_PRIMARY);
495 ofconn->pktbuf = pktbuf_create();
496 ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
498 ofconn->discovery = discovery;
500 char *name = ofconn_make_name(ofproto, c->target);
501 rconn_connect(ofconn->rconn, c->target, name);
504 hmap_insert(&ofproto->controllers, &ofconn->hmap_node,
505 hash_string(c->target, 0));
508 /* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's
509 * target or turn discovery on or off (these are done by creating new ofconns
510 * and deleting old ones), but it can update the rest of an ofconn's
513 update_controller(struct ofconn *ofconn, const struct ofproto_controller *c)
517 ofconn->band = (is_in_band_controller(c)
518 ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND);
520 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
522 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
523 rconn_set_probe_interval(ofconn->rconn, probe_interval);
525 if (ofconn->discovery) {
526 discovery_set_update_resolv_conf(ofconn->discovery,
527 c->update_resolv_conf);
528 discovery_set_accept_controller_re(ofconn->discovery, c->accept_re);
531 ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit);
535 ofconn_get_target(const struct ofconn *ofconn)
537 return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn);
540 static struct ofconn *
541 find_controller_by_target(struct ofproto *ofproto, const char *target)
543 struct ofconn *ofconn;
545 HMAP_FOR_EACH_WITH_HASH (ofconn, hmap_node,
546 hash_string(target, 0), &ofproto->controllers) {
547 if (!strcmp(ofconn_get_target(ofconn), target)) {
555 update_in_band_remotes(struct ofproto *ofproto)
557 const struct ofconn *ofconn;
558 struct sockaddr_in *addrs;
559 size_t max_addrs, n_addrs;
563 /* Allocate enough memory for as many remotes as we could possibly have. */
564 max_addrs = ofproto->n_extra_remotes + hmap_count(&ofproto->controllers);
565 addrs = xmalloc(max_addrs * sizeof *addrs);
568 /* Add all the remotes. */
570 HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) {
571 struct sockaddr_in *sin = &addrs[n_addrs];
573 if (ofconn->band == OFPROTO_OUT_OF_BAND) {
577 sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn);
578 if (sin->sin_addr.s_addr) {
579 sin->sin_port = rconn_get_remote_port(ofconn->rconn);
582 if (ofconn->discovery) {
586 for (i = 0; i < ofproto->n_extra_remotes; i++) {
587 addrs[n_addrs++] = ofproto->extra_in_band_remotes[i];
590 /* Create or update or destroy in-band.
592 * Ordinarily we only enable in-band if there's at least one remote
593 * address, but discovery needs the in-band rules for DHCP to be installed
594 * even before we know any remote addresses. */
595 if (n_addrs || discovery) {
596 if (!ofproto->in_band) {
597 in_band_create(ofproto, ofproto->dpif, ofproto->switch_status,
600 if (ofproto->in_band) {
601 in_band_set_remotes(ofproto->in_band, addrs, n_addrs);
603 ofproto->next_in_band_update = time_msec() + 1000;
605 in_band_destroy(ofproto->in_band);
606 ofproto->in_band = NULL;
614 update_fail_open(struct ofproto *p)
616 struct ofconn *ofconn;
618 if (!hmap_is_empty(&p->controllers)
619 && p->fail_mode == OFPROTO_FAIL_STANDALONE) {
620 struct rconn **rconns;
624 p->fail_open = fail_open_create(p, p->switch_status);
628 rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns);
629 HMAP_FOR_EACH (ofconn, hmap_node, &p->controllers) {
630 rconns[n++] = ofconn->rconn;
633 fail_open_set_controllers(p->fail_open, rconns, n);
634 /* p->fail_open takes ownership of 'rconns'. */
636 fail_open_destroy(p->fail_open);
642 ofproto_set_controllers(struct ofproto *p,
643 const struct ofproto_controller *controllers,
644 size_t n_controllers)
646 struct shash new_controllers;
647 struct ofconn *ofconn, *next_ofconn;
648 struct ofservice *ofservice, *next_ofservice;
652 /* Create newly configured controllers and services.
653 * Create a name to ofproto_controller mapping in 'new_controllers'. */
654 shash_init(&new_controllers);
655 for (i = 0; i < n_controllers; i++) {
656 const struct ofproto_controller *c = &controllers[i];
658 if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) {
659 if (!find_controller_by_target(p, c->target)) {
660 add_controller(p, c);
662 } else if (!pvconn_verify_name(c->target)) {
663 if (!ofservice_lookup(p, c->target) && ofservice_create(p, c)) {
667 VLOG_WARN_RL(&rl, "%s: unsupported controller \"%s\"",
668 dpif_name(p->dpif), c->target);
672 shash_add_once(&new_controllers, c->target, &controllers[i]);
675 /* Delete controllers that are no longer configured.
676 * Update configuration of all now-existing controllers. */
678 HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) {
679 struct ofproto_controller *c;
681 c = shash_find_data(&new_controllers, ofconn_get_target(ofconn));
683 ofconn_destroy(ofconn);
685 update_controller(ofconn, c);
692 /* Delete services that are no longer configured.
693 * Update configuration of all now-existing services. */
694 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
695 struct ofproto_controller *c;
697 c = shash_find_data(&new_controllers,
698 pvconn_get_name(ofservice->pvconn));
700 ofservice_destroy(p, ofservice);
702 ofservice_reconfigure(ofservice, c);
706 shash_destroy(&new_controllers);
708 update_in_band_remotes(p);
711 if (!hmap_is_empty(&p->controllers) && !ss_exists) {
712 ofconn = CONTAINER_OF(hmap_first(&p->controllers),
713 struct ofconn, hmap_node);
714 ofconn->ss = switch_status_register(p->switch_status, "remote",
715 rconn_status_cb, ofconn->rconn);
720 ofproto_set_fail_mode(struct ofproto *p, enum ofproto_fail_mode fail_mode)
722 p->fail_mode = fail_mode;
726 /* Drops the connections between 'ofproto' and all of its controllers, forcing
727 * them to reconnect. */
729 ofproto_reconnect_controllers(struct ofproto *ofproto)
731 struct ofconn *ofconn;
733 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
734 rconn_reconnect(ofconn->rconn);
739 any_extras_changed(const struct ofproto *ofproto,
740 const struct sockaddr_in *extras, size_t n)
744 if (n != ofproto->n_extra_remotes) {
748 for (i = 0; i < n; i++) {
749 const struct sockaddr_in *old = &ofproto->extra_in_band_remotes[i];
750 const struct sockaddr_in *new = &extras[i];
752 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
753 old->sin_port != new->sin_port) {
761 /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s
762 * in-band control should guarantee access, in the same way that in-band
763 * control guarantees access to OpenFlow controllers. */
765 ofproto_set_extra_in_band_remotes(struct ofproto *ofproto,
766 const struct sockaddr_in *extras, size_t n)
768 if (!any_extras_changed(ofproto, extras, n)) {
772 free(ofproto->extra_in_band_remotes);
773 ofproto->n_extra_remotes = n;
774 ofproto->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
776 update_in_band_remotes(ofproto);
780 ofproto_set_desc(struct ofproto *p,
781 const char *mfr_desc, const char *hw_desc,
782 const char *sw_desc, const char *serial_desc,
785 struct ofp_desc_stats *ods;
788 if (strlen(mfr_desc) >= sizeof ods->mfr_desc) {
789 VLOG_WARN("truncating mfr_desc, must be less than %zu characters",
790 sizeof ods->mfr_desc);
793 p->mfr_desc = xstrdup(mfr_desc);
796 if (strlen(hw_desc) >= sizeof ods->hw_desc) {
797 VLOG_WARN("truncating hw_desc, must be less than %zu characters",
798 sizeof ods->hw_desc);
801 p->hw_desc = xstrdup(hw_desc);
804 if (strlen(sw_desc) >= sizeof ods->sw_desc) {
805 VLOG_WARN("truncating sw_desc, must be less than %zu characters",
806 sizeof ods->sw_desc);
809 p->sw_desc = xstrdup(sw_desc);
812 if (strlen(serial_desc) >= sizeof ods->serial_num) {
813 VLOG_WARN("truncating serial_desc, must be less than %zu "
815 sizeof ods->serial_num);
817 free(p->serial_desc);
818 p->serial_desc = xstrdup(serial_desc);
821 if (strlen(dp_desc) >= sizeof ods->dp_desc) {
822 VLOG_WARN("truncating dp_desc, must be less than %zu characters",
823 sizeof ods->dp_desc);
826 p->dp_desc = xstrdup(dp_desc);
831 set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
832 const struct svec *svec)
834 struct pvconn **pvconns = *pvconnsp;
835 size_t n_pvconns = *n_pvconnsp;
839 for (i = 0; i < n_pvconns; i++) {
840 pvconn_close(pvconns[i]);
844 pvconns = xmalloc(svec->n * sizeof *pvconns);
846 for (i = 0; i < svec->n; i++) {
847 const char *name = svec->names[i];
848 struct pvconn *pvconn;
851 error = pvconn_open(name, &pvconn);
853 pvconns[n_pvconns++] = pvconn;
855 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
863 *n_pvconnsp = n_pvconns;
869 ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
871 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
875 ofproto_set_netflow(struct ofproto *ofproto,
876 const struct netflow_options *nf_options)
878 if (nf_options && nf_options->collectors.n) {
879 if (!ofproto->netflow) {
880 ofproto->netflow = netflow_create();
882 return netflow_set_options(ofproto->netflow, nf_options);
884 netflow_destroy(ofproto->netflow);
885 ofproto->netflow = NULL;
891 ofproto_set_sflow(struct ofproto *ofproto,
892 const struct ofproto_sflow_options *oso)
894 struct ofproto_sflow *os = ofproto->sflow;
897 struct ofport *ofport;
899 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
900 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
901 ofproto_sflow_add_port(os, ofport->odp_port,
902 netdev_get_name(ofport->netdev));
905 ofproto_sflow_set_options(os, oso);
907 ofproto_sflow_destroy(os);
908 ofproto->sflow = NULL;
913 ofproto_get_datapath_id(const struct ofproto *ofproto)
915 return ofproto->datapath_id;
919 ofproto_has_primary_controller(const struct ofproto *ofproto)
921 return !hmap_is_empty(&ofproto->controllers);
924 enum ofproto_fail_mode
925 ofproto_get_fail_mode(const struct ofproto *p)
931 ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
935 for (i = 0; i < ofproto->n_snoops; i++) {
936 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
941 ofproto_destroy(struct ofproto *p)
943 struct ofservice *ofservice, *next_ofservice;
944 struct ofconn *ofconn, *next_ofconn;
945 struct ofport *ofport, *next_ofport;
952 /* Destroy fail-open and in-band early, since they touch the classifier. */
953 fail_open_destroy(p->fail_open);
956 in_band_destroy(p->in_band);
958 free(p->extra_in_band_remotes);
960 ofproto_flush_flows(p);
961 classifier_destroy(&p->cls);
962 hmap_destroy(&p->facets);
964 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
965 ofconn_destroy(ofconn);
967 hmap_destroy(&p->controllers);
970 netdev_monitor_destroy(p->netdev_monitor);
971 HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) {
972 hmap_remove(&p->ports, &ofport->hmap_node);
975 shash_destroy(&p->port_by_name);
977 switch_status_destroy(p->switch_status);
978 netflow_destroy(p->netflow);
979 ofproto_sflow_destroy(p->sflow);
981 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
982 ofservice_destroy(p, ofservice);
984 hmap_destroy(&p->services);
986 for (i = 0; i < p->n_snoops; i++) {
987 pvconn_close(p->snoops[i]);
991 mac_learning_destroy(p->ml);
996 free(p->serial_desc);
999 hmap_destroy(&p->ports);
1005 ofproto_run(struct ofproto *p)
1007 int error = ofproto_run1(p);
1009 error = ofproto_run2(p, false);
1015 process_port_change(struct ofproto *ofproto, int error, char *devname)
1017 if (error == ENOBUFS) {
1018 reinit_ports(ofproto);
1019 } else if (!error) {
1020 update_port(ofproto, devname);
1025 /* Returns a "preference level" for snooping 'ofconn'. A higher return value
1026 * means that 'ofconn' is more interesting for monitoring than a lower return
1029 snoop_preference(const struct ofconn *ofconn)
1031 switch (ofconn->role) {
1032 case NX_ROLE_MASTER:
1039 /* Shouldn't happen. */
1044 /* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'.
1045 * Connects this vconn to a controller. */
1047 add_snooper(struct ofproto *ofproto, struct vconn *vconn)
1049 struct ofconn *ofconn, *best;
1051 /* Pick a controller for monitoring. */
1053 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
1054 if (ofconn->type == OFCONN_PRIMARY
1055 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
1061 rconn_add_monitor(best->rconn, vconn);
1063 VLOG_INFO_RL(&rl, "no controller connection to snoop");
1069 ofproto_run1(struct ofproto *p)
1071 struct ofconn *ofconn, *next_ofconn;
1072 struct ofservice *ofservice;
1077 if (shash_is_empty(&p->port_by_name)) {
1081 for (i = 0; i < 50; i++) {
1084 error = dpif_recv(p->dpif, &buf);
1086 if (error == ENODEV) {
1087 /* Someone destroyed the datapath behind our back. The caller
1088 * better destroy us and give up, because we're just going to
1089 * spin from here on out. */
1090 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
1091 VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
1092 dpif_name(p->dpif));
1098 handle_odp_msg(p, buf);
1101 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
1102 process_port_change(p, error, devname);
1104 while ((error = netdev_monitor_poll(p->netdev_monitor,
1105 &devname)) != EAGAIN) {
1106 process_port_change(p, error, devname);
1110 if (time_msec() >= p->next_in_band_update) {
1111 update_in_band_remotes(p);
1113 in_band_run(p->in_band);
1116 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
1120 /* Fail-open maintenance. Do this after processing the ofconns since
1121 * fail-open checks the status of the controller rconn. */
1123 fail_open_run(p->fail_open);
1126 HMAP_FOR_EACH (ofservice, node, &p->services) {
1127 struct vconn *vconn;
1130 retval = pvconn_accept(ofservice->pvconn, OFP_VERSION, &vconn);
1132 struct rconn *rconn;
1135 rconn = rconn_create(ofservice->probe_interval, 0);
1136 name = ofconn_make_name(p, vconn_get_name(vconn));
1137 rconn_connect_unreliably(rconn, vconn, name);
1140 ofconn = ofconn_create(p, rconn, OFCONN_SERVICE);
1141 ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
1142 ofservice->burst_limit);
1143 } else if (retval != EAGAIN) {
1144 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1148 for (i = 0; i < p->n_snoops; i++) {
1149 struct vconn *vconn;
1152 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
1154 add_snooper(p, vconn);
1155 } else if (retval != EAGAIN) {
1156 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1160 if (time_msec() >= p->next_expiration) {
1161 int delay = ofproto_expire(p);
1162 p->next_expiration = time_msec() + delay;
1163 COVERAGE_INC(ofproto_expiration);
1167 netflow_run(p->netflow);
1170 ofproto_sflow_run(p->sflow);
1177 ofproto_run2(struct ofproto *p, bool revalidate_all)
1179 /* Figure out what we need to revalidate now, if anything. */
1180 struct tag_set revalidate_set = p->revalidate_set;
1181 if (p->need_revalidate) {
1182 revalidate_all = true;
1185 /* Clear the revalidation flags. */
1186 tag_set_init(&p->revalidate_set);
1187 p->need_revalidate = false;
1189 /* Now revalidate if there's anything to do. */
1190 if (revalidate_all || !tag_set_is_empty(&revalidate_set)) {
1191 struct facet *facet, *next;
1193 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &p->facets) {
1195 || tag_set_intersects(&revalidate_set, facet->tags)) {
1196 facet_revalidate(p, facet);
1205 ofproto_wait(struct ofproto *p)
1207 struct ofservice *ofservice;
1208 struct ofconn *ofconn;
1211 dpif_recv_wait(p->dpif);
1212 dpif_port_poll_wait(p->dpif);
1213 netdev_monitor_poll_wait(p->netdev_monitor);
1214 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
1215 ofconn_wait(ofconn);
1218 poll_timer_wait_until(p->next_in_band_update);
1219 in_band_wait(p->in_band);
1222 fail_open_wait(p->fail_open);
1225 ofproto_sflow_wait(p->sflow);
1227 if (!tag_set_is_empty(&p->revalidate_set)) {
1228 poll_immediate_wake();
1230 if (p->need_revalidate) {
1231 /* Shouldn't happen, but if it does just go around again. */
1232 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1233 poll_immediate_wake();
1234 } else if (p->next_expiration != LLONG_MAX) {
1235 poll_timer_wait_until(p->next_expiration);
1237 HMAP_FOR_EACH (ofservice, node, &p->services) {
1238 pvconn_wait(ofservice->pvconn);
1240 for (i = 0; i < p->n_snoops; i++) {
1241 pvconn_wait(p->snoops[i]);
1246 ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
1248 tag_set_add(&ofproto->revalidate_set, tag);
1252 ofproto_get_revalidate_set(struct ofproto *ofproto)
1254 return &ofproto->revalidate_set;
1258 ofproto_is_alive(const struct ofproto *p)
1260 return !hmap_is_empty(&p->controllers);
1263 /* Deletes port number 'odp_port' from the datapath for 'ofproto'.
1265 * This is almost the same as calling dpif_port_del() directly on the
1266 * datapath, but it also makes 'ofproto' close its open netdev for the port
1267 * (if any). This makes it possible to create a new netdev of a different
1268 * type under the same name, which otherwise the netdev library would refuse
1269 * to do because of the conflict. (The netdev would eventually get closed on
1270 * the next trip through ofproto_run(), but this interface is more direct.)
1272 * Returns 0 if successful, otherwise a positive errno. */
1274 ofproto_port_del(struct ofproto *ofproto, uint16_t odp_port)
1276 struct ofport *ofport = get_port(ofproto, odp_port);
1277 const char *name = ofport ? (char *) ofport->opp.name : "<unknown>";
1280 error = dpif_port_del(ofproto->dpif, odp_port);
1282 VLOG_ERR("%s: failed to remove port %"PRIu16" (%s) interface (%s)",
1283 dpif_name(ofproto->dpif), odp_port, name, strerror(error));
1284 } else if (ofport) {
1285 /* 'name' is ofport->opp.name and update_port() is going to destroy
1286 * 'ofport'. Just in case update_port() refers to 'name' after it
1287 * destroys 'ofport', make a copy of it around the update_port()
1289 char *devname = xstrdup(name);
1290 update_port(ofproto, devname);
1296 /* Checks if 'ofproto' thinks 'odp_port' should be included in floods. Returns
1297 * true if 'odp_port' exists and should be included, false otherwise. */
1299 ofproto_port_is_floodable(struct ofproto *ofproto, uint16_t odp_port)
1301 struct ofport *ofport = get_port(ofproto, odp_port);
1302 return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD);
1306 ofproto_send_packet(struct ofproto *p, const struct flow *flow,
1307 const union ofp_action *actions, size_t n_actions,
1308 const struct ofpbuf *packet)
1310 struct odp_actions odp_actions;
1313 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
1319 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1321 dpif_execute(p->dpif, odp_actions.actions, odp_actions.n_actions, packet);
1325 /* Adds a flow to the OpenFlow flow table in 'p' that matches 'cls_rule' and
1326 * performs the 'n_actions' actions in 'actions'. The new flow will not
1329 * If cls_rule->priority is in the range of priorities supported by OpenFlow
1330 * (0...65535, inclusive) then the flow will be visible to OpenFlow
1331 * controllers; otherwise, it will be hidden.
1333 * The caller retains ownership of 'cls_rule' and 'actions'. */
1335 ofproto_add_flow(struct ofproto *p, const struct cls_rule *cls_rule,
1336 const union ofp_action *actions, size_t n_actions)
1339 rule = rule_create(cls_rule, actions, n_actions, 0, 0, 0, false);
1340 rule_insert(p, rule);
1344 ofproto_delete_flow(struct ofproto *ofproto, const struct cls_rule *target)
1348 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1351 rule_remove(ofproto, rule);
1356 destroy_rule(struct cls_rule *rule_, void *ofproto_)
1358 struct rule *rule = rule_from_cls_rule(rule_);
1359 struct ofproto *ofproto = ofproto_;
1361 rule_remove(ofproto, rule);
1365 ofproto_flush_flows(struct ofproto *ofproto)
1367 struct facet *facet, *next_facet;
1369 COVERAGE_INC(ofproto_flush);
1371 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
1372 /* Mark the facet as not installed so that facet_remove() doesn't
1373 * bother trying to uninstall it. There is no point in uninstalling it
1374 * individually since we are about to blow away all the facets with
1375 * dpif_flow_flush(). */
1376 facet->installed = false;
1377 facet_remove(ofproto, facet);
1379 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
1380 dpif_flow_flush(ofproto->dpif);
1381 if (ofproto->in_band) {
1382 in_band_flushed(ofproto->in_band);
1384 if (ofproto->fail_open) {
1385 fail_open_flushed(ofproto->fail_open);
1390 reinit_ports(struct ofproto *p)
1392 struct svec devnames;
1393 struct ofport *ofport;
1394 struct odp_port *odp_ports;
1398 COVERAGE_INC(ofproto_reinit_ports);
1400 svec_init(&devnames);
1401 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
1402 svec_add (&devnames, (char *) ofport->opp.name);
1404 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
1405 for (i = 0; i < n_odp_ports; i++) {
1406 svec_add (&devnames, odp_ports[i].devname);
1410 svec_sort_unique(&devnames);
1411 for (i = 0; i < devnames.n; i++) {
1412 update_port(p, devnames.names[i]);
1414 svec_destroy(&devnames);
1417 static struct ofport *
1418 make_ofport(const struct odp_port *odp_port)
1420 struct netdev_options netdev_options;
1421 enum netdev_flags flags;
1422 struct ofport *ofport;
1423 struct netdev *netdev;
1426 memset(&netdev_options, 0, sizeof netdev_options);
1427 netdev_options.name = odp_port->devname;
1428 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
1430 error = netdev_open(&netdev_options, &netdev);
1432 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1433 "cannot be opened (%s)",
1434 odp_port->devname, odp_port->port,
1435 odp_port->devname, strerror(error));
1439 ofport = xmalloc(sizeof *ofport);
1440 ofport->netdev = netdev;
1441 ofport->odp_port = odp_port->port;
1442 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
1443 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
1444 memcpy(ofport->opp.name, odp_port->devname,
1445 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1446 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1448 netdev_get_flags(netdev, &flags);
1449 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1451 ofport->opp.state = netdev_get_carrier(netdev) ? 0 : OFPPS_LINK_DOWN;
1453 netdev_get_features(netdev,
1454 &ofport->opp.curr, &ofport->opp.advertised,
1455 &ofport->opp.supported, &ofport->opp.peer);
1460 ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1462 if (get_port(p, odp_port->port)) {
1463 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1466 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1467 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1476 ofport_equal(const struct ofport *a_, const struct ofport *b_)
1478 const struct ofp_phy_port *a = &a_->opp;
1479 const struct ofp_phy_port *b = &b_->opp;
1481 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1482 return (a->port_no == b->port_no
1483 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1484 && !strcmp((char *) a->name, (char *) b->name)
1485 && a->state == b->state
1486 && a->config == b->config
1487 && a->curr == b->curr
1488 && a->advertised == b->advertised
1489 && a->supported == b->supported
1490 && a->peer == b->peer);
1494 send_port_status(struct ofproto *p, const struct ofport *ofport,
1497 /* XXX Should limit the number of queued port status change messages. */
1498 struct ofconn *ofconn;
1499 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
1500 struct ofp_port_status *ops;
1503 if (!ofconn_receives_async_msgs(ofconn)) {
1507 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1508 ops->reason = reason;
1509 ops->desc = ofport->opp;
1510 hton_ofp_phy_port(&ops->desc);
1511 queue_tx(b, ofconn, NULL);
1516 ofport_install(struct ofproto *p, struct ofport *ofport)
1518 const char *netdev_name = (const char *) ofport->opp.name;
1520 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
1521 hmap_insert(&p->ports, &ofport->hmap_node, hash_int(ofport->odp_port, 0));
1522 shash_add(&p->port_by_name, netdev_name, ofport);
1524 ofproto_sflow_add_port(p->sflow, ofport->odp_port, netdev_name);
1529 ofport_remove(struct ofproto *p, struct ofport *ofport)
1531 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
1532 hmap_remove(&p->ports, &ofport->hmap_node);
1533 shash_delete(&p->port_by_name,
1534 shash_find(&p->port_by_name, (char *) ofport->opp.name));
1536 ofproto_sflow_del_port(p->sflow, ofport->odp_port);
1541 ofport_free(struct ofport *ofport)
1544 netdev_close(ofport->netdev);
1549 static struct ofport *
1550 get_port(const struct ofproto *ofproto, uint16_t odp_port)
1552 struct ofport *port;
1554 HMAP_FOR_EACH_IN_BUCKET (port, hmap_node,
1555 hash_int(odp_port, 0), &ofproto->ports) {
1556 if (port->odp_port == odp_port) {
1564 update_port(struct ofproto *p, const char *devname)
1566 struct odp_port odp_port;
1567 struct ofport *old_ofport;
1568 struct ofport *new_ofport;
1571 COVERAGE_INC(ofproto_update_port);
1573 /* Query the datapath for port information. */
1574 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
1576 /* Find the old ofport. */
1577 old_ofport = shash_find_data(&p->port_by_name, devname);
1580 /* There's no port named 'devname' but there might be a port with
1581 * the same port number. This could happen if a port is deleted
1582 * and then a new one added in its place very quickly, or if a port
1583 * is renamed. In the former case we want to send an OFPPR_DELETE
1584 * and an OFPPR_ADD, and in the latter case we want to send a
1585 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1586 * the old port's ifindex against the new port, or perhaps less
1587 * reliably but more portably by comparing the old port's MAC
1588 * against the new port's MAC. However, this code isn't that smart
1589 * and always sends an OFPPR_MODIFY (XXX). */
1590 old_ofport = get_port(p, odp_port.port);
1592 } else if (error != ENOENT && error != ENODEV) {
1593 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1594 "%s", strerror(error));
1598 /* Create a new ofport. */
1599 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1601 /* Eliminate a few pathological cases. */
1602 if (!old_ofport && !new_ofport) {
1604 } else if (old_ofport && new_ofport) {
1605 /* Most of the 'config' bits are OpenFlow soft state, but
1606 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1607 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1608 * leaves the other bits 0.) */
1609 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1611 if (ofport_equal(old_ofport, new_ofport)) {
1612 /* False alarm--no change. */
1613 ofport_free(new_ofport);
1618 /* Now deal with the normal cases. */
1620 ofport_remove(p, old_ofport);
1623 ofport_install(p, new_ofport);
1625 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1626 (!old_ofport ? OFPPR_ADD
1627 : !new_ofport ? OFPPR_DELETE
1629 ofport_free(old_ofport);
1633 init_ports(struct ofproto *p)
1635 struct odp_port *ports;
1640 error = dpif_port_list(p->dpif, &ports, &n_ports);
1645 for (i = 0; i < n_ports; i++) {
1646 const struct odp_port *odp_port = &ports[i];
1647 if (!ofport_conflicts(p, odp_port)) {
1648 struct ofport *ofport = make_ofport(odp_port);
1650 ofport_install(p, ofport);
1658 static struct ofconn *
1659 ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type)
1661 struct ofconn *ofconn = xzalloc(sizeof *ofconn);
1662 ofconn->ofproto = p;
1663 list_push_back(&p->all_conns, &ofconn->node);
1664 ofconn->rconn = rconn;
1665 ofconn->type = type;
1666 ofconn->flow_format = NXFF_OPENFLOW10;
1667 ofconn->role = NX_ROLE_OTHER;
1668 ofconn->packet_in_counter = rconn_packet_counter_create ();
1669 ofconn->pktbuf = NULL;
1670 ofconn->miss_send_len = 0;
1671 ofconn->reply_counter = rconn_packet_counter_create ();
1676 ofconn_destroy(struct ofconn *ofconn)
1678 if (ofconn->type == OFCONN_PRIMARY) {
1679 hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
1681 discovery_destroy(ofconn->discovery);
1683 list_remove(&ofconn->node);
1684 switch_status_unregister(ofconn->ss);
1685 rconn_destroy(ofconn->rconn);
1686 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1687 rconn_packet_counter_destroy(ofconn->reply_counter);
1688 pktbuf_destroy(ofconn->pktbuf);
1693 ofconn_run(struct ofconn *ofconn)
1695 struct ofproto *p = ofconn->ofproto;
1699 if (ofconn->discovery) {
1700 char *controller_name;
1701 if (rconn_is_connectivity_questionable(ofconn->rconn)) {
1702 discovery_question_connectivity(ofconn->discovery);
1704 if (discovery_run(ofconn->discovery, &controller_name)) {
1705 if (controller_name) {
1706 char *ofconn_name = ofconn_make_name(p, controller_name);
1707 rconn_connect(ofconn->rconn, controller_name, ofconn_name);
1710 rconn_disconnect(ofconn->rconn);
1715 for (i = 0; i < N_SCHEDULERS; i++) {
1716 pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
1719 rconn_run(ofconn->rconn);
1721 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1722 /* Limit the number of iterations to prevent other tasks from
1724 for (iteration = 0; iteration < 50; iteration++) {
1725 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1730 fail_open_maybe_recover(p->fail_open);
1732 handle_openflow(ofconn, of_msg);
1733 ofpbuf_delete(of_msg);
1737 if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) {
1738 ofconn_destroy(ofconn);
1743 ofconn_wait(struct ofconn *ofconn)
1747 if (ofconn->discovery) {
1748 discovery_wait(ofconn->discovery);
1750 for (i = 0; i < N_SCHEDULERS; i++) {
1751 pinsched_wait(ofconn->schedulers[i]);
1753 rconn_run_wait(ofconn->rconn);
1754 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1755 rconn_recv_wait(ofconn->rconn);
1757 COVERAGE_INC(ofproto_ofconn_stuck);
1761 /* Returns true if 'ofconn' should receive asynchronous messages. */
1763 ofconn_receives_async_msgs(const struct ofconn *ofconn)
1765 if (ofconn->type == OFCONN_PRIMARY) {
1766 /* Primary controllers always get asynchronous messages unless they
1767 * have configured themselves as "slaves". */
1768 return ofconn->role != NX_ROLE_SLAVE;
1770 /* Service connections don't get asynchronous messages unless they have
1771 * explicitly asked for them by setting a nonzero miss send length. */
1772 return ofconn->miss_send_len > 0;
1776 /* Returns a human-readable name for an OpenFlow connection between 'ofproto'
1777 * and 'target', suitable for use in log messages for identifying the
1780 * The name is dynamically allocated. The caller should free it (with free())
1781 * when it is no longer needed. */
1783 ofconn_make_name(const struct ofproto *ofproto, const char *target)
1785 return xasprintf("%s<->%s", dpif_base_name(ofproto->dpif), target);
1789 ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst)
1793 for (i = 0; i < N_SCHEDULERS; i++) {
1794 struct pinsched **s = &ofconn->schedulers[i];
1798 *s = pinsched_create(rate, burst,
1799 ofconn->ofproto->switch_status);
1801 pinsched_set_limits(*s, rate, burst);
1804 pinsched_destroy(*s);
1811 ofservice_reconfigure(struct ofservice *ofservice,
1812 const struct ofproto_controller *c)
1814 ofservice->probe_interval = c->probe_interval;
1815 ofservice->rate_limit = c->rate_limit;
1816 ofservice->burst_limit = c->burst_limit;
1819 /* Creates a new ofservice in 'ofproto'. Returns 0 if successful, otherwise a
1820 * positive errno value. */
1822 ofservice_create(struct ofproto *ofproto, const struct ofproto_controller *c)
1824 struct ofservice *ofservice;
1825 struct pvconn *pvconn;
1828 error = pvconn_open(c->target, &pvconn);
1833 ofservice = xzalloc(sizeof *ofservice);
1834 hmap_insert(&ofproto->services, &ofservice->node,
1835 hash_string(c->target, 0));
1836 ofservice->pvconn = pvconn;
1838 ofservice_reconfigure(ofservice, c);
1844 ofservice_destroy(struct ofproto *ofproto, struct ofservice *ofservice)
1846 hmap_remove(&ofproto->services, &ofservice->node);
1847 pvconn_close(ofservice->pvconn);
1851 /* Finds and returns the ofservice within 'ofproto' that has the given
1852 * 'target', or a null pointer if none exists. */
1853 static struct ofservice *
1854 ofservice_lookup(struct ofproto *ofproto, const char *target)
1856 struct ofservice *ofservice;
1858 HMAP_FOR_EACH_WITH_HASH (ofservice, node, hash_string(target, 0),
1859 &ofproto->services) {
1860 if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
1867 /* Returns true if 'rule' should be hidden from the controller.
1869 * Rules with priority higher than UINT16_MAX are set up by ofproto itself
1870 * (e.g. by in-band control) and are intentionally hidden from the
1873 rule_is_hidden(const struct rule *rule)
1875 return rule->cr.priority > UINT16_MAX;
1878 /* Creates and returns a new rule initialized as specified.
1880 * The caller is responsible for inserting the rule into the classifier (with
1881 * rule_insert()). */
1882 static struct rule *
1883 rule_create(const struct cls_rule *cls_rule,
1884 const union ofp_action *actions, size_t n_actions,
1885 uint16_t idle_timeout, uint16_t hard_timeout,
1886 ovs_be64 flow_cookie, bool send_flow_removed)
1888 struct rule *rule = xzalloc(sizeof *rule);
1889 rule->cr = *cls_rule;
1890 rule->idle_timeout = idle_timeout;
1891 rule->hard_timeout = hard_timeout;
1892 rule->flow_cookie = flow_cookie;
1893 rule->used = rule->created = time_msec();
1894 rule->send_flow_removed = send_flow_removed;
1895 list_init(&rule->facets);
1896 if (n_actions > 0) {
1897 rule->n_actions = n_actions;
1898 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
1904 static struct rule *
1905 rule_from_cls_rule(const struct cls_rule *cls_rule)
1907 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1911 rule_free(struct rule *rule)
1913 free(rule->actions);
1917 /* Destroys 'rule' and iterates through all of its facets and revalidates them,
1918 * destroying any that no longer has a rule (which is probably all of them).
1920 * The caller must have already removed 'rule' from the classifier. */
1922 rule_destroy(struct ofproto *ofproto, struct rule *rule)
1924 struct facet *facet, *next_facet;
1925 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
1926 facet_revalidate(ofproto, facet);
1931 /* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action
1932 * that outputs to 'out_port' (output to OFPP_FLOOD and OFPP_ALL doesn't
1935 rule_has_out_port(const struct rule *rule, ovs_be16 out_port)
1937 const union ofp_action *oa;
1938 struct actions_iterator i;
1940 if (out_port == htons(OFPP_NONE)) {
1943 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1944 oa = actions_next(&i)) {
1945 if (action_outputs_to_port(oa, out_port)) {
1952 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
1953 * 'packet', which arrived on 'in_port'.
1955 * Takes ownership of 'packet'. */
1957 execute_odp_actions(struct ofproto *ofproto, uint16_t in_port,
1958 const union odp_action *actions, size_t n_actions,
1959 struct ofpbuf *packet)
1961 if (n_actions == 1 && actions[0].type == ODPAT_CONTROLLER) {
1962 /* As an optimization, avoid a round-trip from userspace to kernel to
1963 * userspace. This also avoids possibly filling up kernel packet
1964 * buffers along the way. */
1965 struct odp_msg *msg;
1967 msg = ofpbuf_push_uninit(packet, sizeof *msg);
1968 msg->type = _ODPL_ACTION_NR;
1969 msg->length = sizeof(struct odp_msg) + packet->size;
1970 msg->port = in_port;
1972 msg->arg = actions[0].controller.arg;
1974 send_packet_in(ofproto, packet);
1980 error = dpif_execute(ofproto->dpif, actions, n_actions, packet);
1981 ofpbuf_delete(packet);
1986 /* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
1987 * statistics appropriately. 'packet' must have at least sizeof(struct
1988 * ofp_packet_in) bytes of headroom.
1990 * For correct results, 'packet' must actually be in 'facet''s flow; that is,
1991 * applying flow_extract() to 'packet' would yield the same flow as
1994 * 'facet' must have accurately composed ODP actions; that is, it must not be
1995 * in need of revalidation.
1997 * Takes ownership of 'packet'. */
1999 facet_execute(struct ofproto *ofproto, struct facet *facet,
2000 struct ofpbuf *packet)
2002 struct odp_flow_stats stats;
2004 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
2006 flow_extract_stats(&facet->flow, packet, &stats);
2007 if (execute_odp_actions(ofproto, facet->flow.in_port,
2008 facet->actions, facet->n_actions, packet)) {
2009 facet_update_stats(ofproto, facet, &stats);
2010 facet->used = time_msec();
2011 netflow_flow_update_time(ofproto->netflow,
2012 &facet->nf_flow, facet->used);
2016 /* Executes the actions indicated by 'rule' on 'packet' and credits 'rule''s
2017 * statistics (or the statistics for one of its facets) appropriately.
2018 * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of headroom.
2020 * 'packet' doesn't necessarily have to match 'rule'. 'rule' will be credited
2021 * with statistics for 'packet' either way.
2023 * Takes ownership of 'packet'. */
2025 rule_execute(struct ofproto *ofproto, struct rule *rule, uint16_t in_port,
2026 struct ofpbuf *packet)
2028 struct facet *facet;
2029 struct odp_actions a;
2033 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
2035 flow_extract(packet, 0, in_port, &flow);
2037 /* First look for a related facet. If we find one, account it to that. */
2038 facet = facet_lookup_valid(ofproto, &flow);
2039 if (facet && facet->rule == rule) {
2040 facet_execute(ofproto, facet, packet);
2044 /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then
2045 * create a new facet for it and use that. */
2046 if (rule_lookup(ofproto, &flow) == rule) {
2047 facet = facet_create(ofproto, rule, &flow, packet);
2048 facet_execute(ofproto, facet, packet);
2049 facet_install(ofproto, facet, true);
2053 /* We can't account anything to a facet. If we were to try, then that
2054 * facet would have a non-matching rule, busting our invariants. */
2055 if (xlate_actions(rule->actions, rule->n_actions, &flow, ofproto,
2056 packet, &a, NULL, 0, NULL)) {
2057 ofpbuf_delete(packet);
2060 size = packet->size;
2061 if (execute_odp_actions(ofproto, in_port,
2062 a.actions, a.n_actions, packet)) {
2063 rule->used = time_msec();
2064 rule->packet_count++;
2065 rule->byte_count += size;
2069 /* Inserts 'rule' into 'p''s flow table. */
2071 rule_insert(struct ofproto *p, struct rule *rule)
2073 struct rule *displaced_rule;
2075 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
2076 if (displaced_rule) {
2077 rule_destroy(p, displaced_rule);
2079 p->need_revalidate = true;
2082 /* Creates and returns a new facet within 'ofproto' owned by 'rule', given a
2083 * 'flow' and an example 'packet' within that flow.
2085 * The caller must already have determined that no facet with an identical
2086 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
2087 * 'ofproto''s classifier table. */
2088 static struct facet *
2089 facet_create(struct ofproto *ofproto, struct rule *rule,
2090 const struct flow *flow, const struct ofpbuf *packet)
2092 struct facet *facet;
2094 facet = xzalloc(sizeof *facet);
2095 facet->used = time_msec();
2096 hmap_insert(&ofproto->facets, &facet->hmap_node, flow_hash(flow, 0));
2097 list_push_back(&rule->facets, &facet->list_node);
2099 facet->flow = *flow;
2100 netflow_flow_init(&facet->nf_flow);
2101 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
2103 facet_make_actions(ofproto, facet, packet);
2109 facet_free(struct facet *facet)
2111 free(facet->actions);
2115 /* Remove 'rule' from 'ofproto' and free up the associated memory:
2117 * - Removes 'rule' from the classifier.
2119 * - If 'rule' has facets, revalidates them (and possibly uninstalls and
2120 * destroys them), via rule_destroy().
2123 rule_remove(struct ofproto *ofproto, struct rule *rule)
2125 COVERAGE_INC(ofproto_del_rule);
2126 ofproto->need_revalidate = true;
2127 classifier_remove(&ofproto->cls, &rule->cr);
2128 rule_destroy(ofproto, rule);
2131 /* Remove 'facet' from 'ofproto' and free up the associated memory:
2133 * - If 'facet' was installed in the datapath, uninstalls it and updates its
2134 * rule's statistics, via facet_uninstall().
2136 * - Removes 'facet' from its rule and from ofproto->facets.
2139 facet_remove(struct ofproto *ofproto, struct facet *facet)
2141 facet_uninstall(ofproto, facet);
2142 facet_flush_stats(ofproto, facet);
2143 hmap_remove(&ofproto->facets, &facet->hmap_node);
2144 list_remove(&facet->list_node);
2148 /* Composes the ODP actions for 'facet' based on its rule's actions. */
2150 facet_make_actions(struct ofproto *p, struct facet *facet,
2151 const struct ofpbuf *packet)
2153 const struct rule *rule = facet->rule;
2154 struct odp_actions a;
2157 xlate_actions(rule->actions, rule->n_actions, &facet->flow, p,
2158 packet, &a, &facet->tags, &facet->may_install,
2159 &facet->nf_flow.output_iface);
2161 actions_len = a.n_actions * sizeof *a.actions;
2162 if (facet->n_actions != a.n_actions
2163 || memcmp(facet->actions, a.actions, actions_len)) {
2164 free(facet->actions);
2165 facet->n_actions = a.n_actions;
2166 facet->actions = xmemdup(a.actions, actions_len);
2171 facet_put__(struct ofproto *ofproto, struct facet *facet, int flags,
2172 struct odp_flow_put *put)
2174 memset(&put->flow.stats, 0, sizeof put->flow.stats);
2175 odp_flow_key_from_flow(&put->flow.key, &facet->flow);
2176 put->flow.actions = facet->actions;
2177 put->flow.n_actions = facet->n_actions;
2178 put->flow.flags = 0;
2180 return dpif_flow_put(ofproto->dpif, put);
2183 /* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If
2184 * 'zero_stats' is true, clears any existing statistics from the datapath for
2187 facet_install(struct ofproto *p, struct facet *facet, bool zero_stats)
2189 if (facet->may_install) {
2190 struct odp_flow_put put;
2193 flags = ODPPF_CREATE | ODPPF_MODIFY;
2195 flags |= ODPPF_ZERO_STATS;
2197 if (!facet_put__(p, facet, flags, &put)) {
2198 facet->installed = true;
2203 /* Ensures that the bytes in 'facet', plus 'extra_bytes', have been passed up
2204 * to the accounting hook function in the ofhooks structure. */
2206 facet_account(struct ofproto *ofproto,
2207 struct facet *facet, uint64_t extra_bytes)
2209 uint64_t total_bytes = facet->byte_count + extra_bytes;
2211 if (ofproto->ofhooks->account_flow_cb
2212 && total_bytes > facet->accounted_bytes)
2214 ofproto->ofhooks->account_flow_cb(
2215 &facet->flow, facet->tags, facet->actions, facet->n_actions,
2216 total_bytes - facet->accounted_bytes, ofproto->aux);
2217 facet->accounted_bytes = total_bytes;
2221 /* If 'rule' is installed in the datapath, uninstalls it. */
2223 facet_uninstall(struct ofproto *p, struct facet *facet)
2225 if (facet->installed) {
2226 struct odp_flow odp_flow;
2228 odp_flow_key_from_flow(&odp_flow.key, &facet->flow);
2229 odp_flow.actions = NULL;
2230 odp_flow.n_actions = 0;
2232 if (!dpif_flow_del(p->dpif, &odp_flow)) {
2233 facet_update_stats(p, facet, &odp_flow.stats);
2235 facet->installed = false;
2239 /* Returns true if the only action for 'facet' is to send to the controller.
2240 * (We don't report NetFlow expiration messages for such facets because they
2241 * are just part of the control logic for the network, not real traffic). */
2243 facet_is_controller_flow(struct facet *facet)
2246 && facet->rule->n_actions == 1
2247 && action_outputs_to_port(&facet->rule->actions[0],
2248 htons(OFPP_CONTROLLER)));
2251 /* Folds all of 'facet''s statistics into its rule. Also updates the
2252 * accounting ofhook and emits a NetFlow expiration if appropriate. */
2254 facet_flush_stats(struct ofproto *ofproto, struct facet *facet)
2256 facet_account(ofproto, facet, 0);
2258 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
2259 struct ofexpired expired;
2260 expired.flow = facet->flow;
2261 expired.packet_count = facet->packet_count;
2262 expired.byte_count = facet->byte_count;
2263 expired.used = facet->used;
2264 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
2267 facet->rule->packet_count += facet->packet_count;
2268 facet->rule->byte_count += facet->byte_count;
2270 /* Reset counters to prevent double counting if 'facet' ever gets
2272 facet->packet_count = 0;
2273 facet->byte_count = 0;
2274 facet->accounted_bytes = 0;
2276 netflow_flow_clear(&facet->nf_flow);
2279 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2280 * Returns it if found, otherwise a null pointer.
2282 * The returned facet might need revalidation; use facet_lookup_valid()
2283 * instead if that is important. */
2284 static struct facet *
2285 facet_find(struct ofproto *ofproto, const struct flow *flow)
2287 struct facet *facet;
2289 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, flow_hash(flow, 0),
2291 if (flow_equal(flow, &facet->flow)) {
2299 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2300 * Returns it if found, otherwise a null pointer.
2302 * The returned facet is guaranteed to be valid. */
2303 static struct facet *
2304 facet_lookup_valid(struct ofproto *ofproto, const struct flow *flow)
2306 struct facet *facet = facet_find(ofproto, flow);
2308 /* The facet we found might not be valid, since we could be in need of
2309 * revalidation. If it is not valid, don't return it. */
2311 && ofproto->need_revalidate
2312 && !facet_revalidate(ofproto, facet)) {
2313 COVERAGE_INC(ofproto_invalidated);
2320 /* Re-searches 'ofproto''s classifier for a rule matching 'facet':
2322 * - If the rule found is different from 'facet''s current rule, moves
2323 * 'facet' to the new rule and recompiles its actions.
2325 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
2326 * where it is and recompiles its actions anyway.
2328 * - If there is none, destroys 'facet'.
2330 * Returns true if 'facet' still exists, false if it has been destroyed. */
2332 facet_revalidate(struct ofproto *ofproto, struct facet *facet)
2334 struct rule *new_rule;
2335 struct odp_actions a;
2337 uint16_t new_nf_output_iface;
2338 bool actions_changed;
2340 COVERAGE_INC(facet_revalidate);
2342 /* Determine the new rule. */
2343 new_rule = rule_lookup(ofproto, &facet->flow);
2345 /* No new rule, so delete the facet. */
2346 facet_remove(ofproto, facet);
2350 /* Calculate new ODP actions.
2352 * We are very cautious about actually modifying 'facet' state at this
2353 * point, because we might need to, e.g., emit a NetFlow expiration and, if
2354 * so, we need to have the old state around to properly compose it. */
2355 xlate_actions(new_rule->actions, new_rule->n_actions, &facet->flow,
2356 ofproto, NULL, &a, &facet->tags, &facet->may_install,
2357 &new_nf_output_iface);
2358 actions_len = a.n_actions * sizeof *a.actions;
2359 actions_changed = (facet->n_actions != a.n_actions
2360 || memcmp(facet->actions, a.actions, actions_len));
2362 /* If the ODP actions changed or the installability changed, then we need
2363 * to talk to the datapath. */
2364 if (actions_changed || facet->may_install != facet->installed) {
2365 if (facet->may_install) {
2366 struct odp_flow_put put;
2368 memset(&put.flow.stats, 0, sizeof put.flow.stats);
2369 odp_flow_key_from_flow(&put.flow.key, &facet->flow);
2370 put.flow.actions = a.actions;
2371 put.flow.n_actions = a.n_actions;
2373 put.flags = ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS;
2374 dpif_flow_put(ofproto->dpif, &put);
2376 facet_update_stats(ofproto, facet, &put.flow.stats);
2378 facet_uninstall(ofproto, facet);
2381 /* The datapath flow is gone or has zeroed stats, so push stats out of
2382 * 'facet' into 'rule'. */
2383 facet_flush_stats(ofproto, facet);
2386 /* Update 'facet' now that we've taken care of all the old state. */
2387 facet->nf_flow.output_iface = new_nf_output_iface;
2388 if (actions_changed) {
2389 free(facet->actions);
2390 facet->n_actions = a.n_actions;
2391 facet->actions = xmemdup(a.actions, actions_len);
2393 if (facet->rule != new_rule) {
2394 COVERAGE_INC(facet_changed_rule);
2395 list_remove(&facet->list_node);
2396 list_push_back(&new_rule->facets, &facet->list_node);
2397 facet->rule = new_rule;
2398 facet->used = new_rule->created;
2405 queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
2406 struct rconn_packet_counter *counter)
2408 update_openflow_length(msg);
2409 if (rconn_send(ofconn->rconn, msg, counter)) {
2415 send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
2418 struct ofpbuf *buf = make_ofp_error_msg(error, oh);
2420 COVERAGE_INC(ofproto_error);
2421 queue_tx(buf, ofconn, ofconn->reply_counter);
2426 hton_ofp_phy_port(struct ofp_phy_port *opp)
2428 opp->port_no = htons(opp->port_no);
2429 opp->config = htonl(opp->config);
2430 opp->state = htonl(opp->state);
2431 opp->curr = htonl(opp->curr);
2432 opp->advertised = htonl(opp->advertised);
2433 opp->supported = htonl(opp->supported);
2434 opp->peer = htonl(opp->peer);
2438 handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
2440 struct ofp_header *rq = oh;
2441 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
2446 handle_features_request(struct ofconn *ofconn, struct ofp_header *oh)
2448 struct ofp_switch_features *osf;
2450 struct ofport *port;
2452 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
2453 osf->datapath_id = htonll(ofconn->ofproto->datapath_id);
2454 osf->n_buffers = htonl(pktbuf_capacity());
2456 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
2457 OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
2458 osf->actions = htonl((1u << OFPAT_OUTPUT) |
2459 (1u << OFPAT_SET_VLAN_VID) |
2460 (1u << OFPAT_SET_VLAN_PCP) |
2461 (1u << OFPAT_STRIP_VLAN) |
2462 (1u << OFPAT_SET_DL_SRC) |
2463 (1u << OFPAT_SET_DL_DST) |
2464 (1u << OFPAT_SET_NW_SRC) |
2465 (1u << OFPAT_SET_NW_DST) |
2466 (1u << OFPAT_SET_NW_TOS) |
2467 (1u << OFPAT_SET_TP_SRC) |
2468 (1u << OFPAT_SET_TP_DST) |
2469 (1u << OFPAT_ENQUEUE));
2471 HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) {
2472 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
2475 queue_tx(buf, ofconn, ofconn->reply_counter);
2480 handle_get_config_request(struct ofconn *ofconn, struct ofp_header *oh)
2483 struct ofp_switch_config *osc;
2487 /* Figure out flags. */
2488 dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags);
2489 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
2492 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
2493 osc->flags = htons(flags);
2494 osc->miss_send_len = htons(ofconn->miss_send_len);
2495 queue_tx(buf, ofconn, ofconn->reply_counter);
2501 handle_set_config(struct ofconn *ofconn, struct ofp_switch_config *osc)
2506 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
2510 flags = ntohs(osc->flags);
2512 if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) {
2513 switch (flags & OFPC_FRAG_MASK) {
2514 case OFPC_FRAG_NORMAL:
2515 dpif_set_drop_frags(ofconn->ofproto->dpif, false);
2517 case OFPC_FRAG_DROP:
2518 dpif_set_drop_frags(ofconn->ofproto->dpif, true);
2521 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
2527 ofconn->miss_send_len = ntohs(osc->miss_send_len);
2533 add_controller_action(struct odp_actions *actions, uint16_t max_len)
2535 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
2536 a->controller.arg = max_len;
2539 struct action_xlate_ctx {
2541 struct flow flow; /* Flow to which these actions correspond. */
2542 int recurse; /* Recursion level, via xlate_table_action. */
2543 struct ofproto *ofproto;
2544 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
2545 * null pointer if we are revalidating
2546 * without a packet to refer to. */
2549 struct odp_actions *out; /* Datapath actions. */
2550 tag_type tags; /* Tags associated with OFPP_NORMAL actions. */
2551 bool may_set_up_flow; /* True ordinarily; false if the actions must
2552 * be reassessed for every packet. */
2553 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
2556 /* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a
2557 * flow translation. */
2558 #define MAX_RESUBMIT_RECURSION 8
2560 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2561 struct action_xlate_ctx *ctx);
2564 add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2566 const struct ofport *ofport = get_port(ctx->ofproto, port);
2569 if (ofport->opp.config & OFPPC_NO_FWD) {
2570 /* Forwarding disabled on port. */
2575 * We don't have an ofport record for this port, but it doesn't hurt to
2576 * allow forwarding to it anyhow. Maybe such a port will appear later
2577 * and we're pre-populating the flow table.
2581 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
2582 ctx->nf_output_iface = port;
2585 static struct rule *
2586 rule_lookup(struct ofproto *ofproto, const struct flow *flow)
2588 return rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow,
2593 xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2595 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
2596 uint16_t old_in_port;
2599 /* Look up a flow with 'in_port' as the input port. Then restore the
2600 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2601 * have surprising behavior). */
2602 old_in_port = ctx->flow.in_port;
2603 ctx->flow.in_port = in_port;
2604 rule = rule_lookup(ctx->ofproto, &ctx->flow);
2605 ctx->flow.in_port = old_in_port;
2609 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2613 struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
2615 VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times",
2616 MAX_RESUBMIT_RECURSION);
2621 flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask,
2622 uint16_t *nf_output_iface, struct odp_actions *actions)
2624 struct ofport *ofport;
2626 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
2627 uint16_t odp_port = ofport->odp_port;
2628 if (odp_port != odp_in_port && !(ofport->opp.config & mask)) {
2629 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = odp_port;
2632 *nf_output_iface = NF_OUT_FLOOD;
2636 xlate_output_action__(struct action_xlate_ctx *ctx,
2637 uint16_t port, uint16_t max_len)
2640 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2642 ctx->nf_output_iface = NF_OUT_DROP;
2646 add_output_action(ctx, ctx->flow.in_port);
2649 xlate_table_action(ctx, ctx->flow.in_port);
2652 if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet,
2653 ctx->out, &ctx->tags,
2654 &ctx->nf_output_iface,
2655 ctx->ofproto->aux)) {
2656 COVERAGE_INC(ofproto_uninstallable);
2657 ctx->may_set_up_flow = false;
2661 flood_packets(ctx->ofproto, ctx->flow.in_port, OFPPC_NO_FLOOD,
2662 &ctx->nf_output_iface, ctx->out);
2665 flood_packets(ctx->ofproto, ctx->flow.in_port, 0,
2666 &ctx->nf_output_iface, ctx->out);
2668 case OFPP_CONTROLLER:
2669 add_controller_action(ctx->out, max_len);
2672 add_output_action(ctx, ODPP_LOCAL);
2675 odp_port = ofp_port_to_odp_port(port);
2676 if (odp_port != ctx->flow.in_port) {
2677 add_output_action(ctx, odp_port);
2682 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2683 ctx->nf_output_iface = NF_OUT_FLOOD;
2684 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2685 ctx->nf_output_iface = prev_nf_output_iface;
2686 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2687 ctx->nf_output_iface != NF_OUT_FLOOD) {
2688 ctx->nf_output_iface = NF_OUT_MULTI;
2693 xlate_output_action(struct action_xlate_ctx *ctx,
2694 const struct ofp_action_output *oao)
2696 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
2699 /* If the final ODP action in 'ctx' is "pop priority", drop it, as an
2700 * optimization, because we're going to add another action that sets the
2701 * priority immediately after, or because there are no actions following the
2704 remove_pop_action(struct action_xlate_ctx *ctx)
2706 size_t n = ctx->out->n_actions;
2707 if (n > 0 && ctx->out->actions[n - 1].type == ODPAT_POP_PRIORITY) {
2708 ctx->out->n_actions--;
2713 xlate_enqueue_action(struct action_xlate_ctx *ctx,
2714 const struct ofp_action_enqueue *oae)
2716 uint16_t ofp_port, odp_port;
2720 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
2723 /* Fall back to ordinary output action. */
2724 xlate_output_action__(ctx, ntohs(oae->port), 0);
2728 /* Figure out ODP output port. */
2729 ofp_port = ntohs(oae->port);
2730 if (ofp_port != OFPP_IN_PORT) {
2731 odp_port = ofp_port_to_odp_port(ofp_port);
2733 odp_port = ctx->flow.in_port;
2736 /* Add ODP actions. */
2737 remove_pop_action(ctx);
2738 odp_actions_add(ctx->out, ODPAT_SET_PRIORITY)->priority.priority
2740 add_output_action(ctx, odp_port);
2741 odp_actions_add(ctx->out, ODPAT_POP_PRIORITY);
2743 /* Update NetFlow output port. */
2744 if (ctx->nf_output_iface == NF_OUT_DROP) {
2745 ctx->nf_output_iface = odp_port;
2746 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
2747 ctx->nf_output_iface = NF_OUT_MULTI;
2752 xlate_set_queue_action(struct action_xlate_ctx *ctx,
2753 const struct nx_action_set_queue *nasq)
2758 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
2761 /* Couldn't translate queue to a priority, so ignore. A warning
2762 * has already been logged. */
2766 remove_pop_action(ctx);
2767 odp_actions_add(ctx->out, ODPAT_SET_PRIORITY)->priority.priority
2772 xlate_set_dl_tci(struct action_xlate_ctx *ctx)
2774 ovs_be16 dl_vlan = ctx->flow.dl_vlan;
2775 uint8_t dl_vlan_pcp = ctx->flow.dl_vlan_pcp;
2777 if (dl_vlan == htons(OFP_VLAN_NONE)) {
2778 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2780 union odp_action *oa = odp_actions_add(ctx->out, ODPAT_SET_DL_TCI);
2781 oa->dl_tci.tci = htons(ntohs(dl_vlan & htons(VLAN_VID_MASK))
2782 | (dl_vlan_pcp << VLAN_PCP_SHIFT)
2788 xlate_reg_move_action(struct action_xlate_ctx *ctx,
2789 const struct nx_action_reg_move *narm)
2791 ovs_be16 old_vlan = ctx->flow.dl_vlan;
2792 uint8_t old_pcp = ctx->flow.dl_vlan_pcp;
2794 nxm_execute_reg_move(narm, &ctx->flow);
2796 if (ctx->flow.dl_vlan != old_vlan || ctx->flow.dl_vlan_pcp != old_pcp) {
2797 xlate_set_dl_tci(ctx);
2802 xlate_nicira_action(struct action_xlate_ctx *ctx,
2803 const struct nx_action_header *nah)
2805 const struct nx_action_resubmit *nar;
2806 const struct nx_action_set_tunnel *nast;
2807 const struct nx_action_set_queue *nasq;
2808 union odp_action *oa;
2809 int subtype = ntohs(nah->subtype);
2811 assert(nah->vendor == htonl(NX_VENDOR_ID));
2813 case NXAST_RESUBMIT:
2814 nar = (const struct nx_action_resubmit *) nah;
2815 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2818 case NXAST_SET_TUNNEL:
2819 nast = (const struct nx_action_set_tunnel *) nah;
2820 oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL);
2821 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
2824 case NXAST_DROP_SPOOFED_ARP:
2825 if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
2826 odp_actions_add(ctx->out, ODPAT_DROP_SPOOFED_ARP);
2830 case NXAST_SET_QUEUE:
2831 nasq = (const struct nx_action_set_queue *) nah;
2832 xlate_set_queue_action(ctx, nasq);
2835 case NXAST_POP_QUEUE:
2836 odp_actions_add(ctx->out, ODPAT_POP_PRIORITY);
2839 case NXAST_REG_MOVE:
2840 xlate_reg_move_action(ctx, (const struct nx_action_reg_move *) nah);
2843 case NXAST_REG_LOAD:
2844 nxm_execute_reg_load((const struct nx_action_reg_load *) nah,
2848 /* If you add a new action here that modifies flow data, don't forget to
2849 * update the flow key in ctx->flow at the same time. */
2852 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2858 do_xlate_actions(const union ofp_action *in, size_t n_in,
2859 struct action_xlate_ctx *ctx)
2861 struct actions_iterator iter;
2862 const union ofp_action *ia;
2863 const struct ofport *port;
2865 port = get_port(ctx->ofproto, ctx->flow.in_port);
2866 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2867 port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
2868 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2869 /* Drop this flow. */
2873 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2874 uint16_t type = ntohs(ia->type);
2875 union odp_action *oa;
2879 xlate_output_action(ctx, &ia->output);
2882 case OFPAT_SET_VLAN_VID:
2883 ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid;
2884 xlate_set_dl_tci(ctx);
2887 case OFPAT_SET_VLAN_PCP:
2888 ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp;
2889 xlate_set_dl_tci(ctx);
2892 case OFPAT_STRIP_VLAN:
2893 ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
2894 ctx->flow.dl_vlan_pcp = 0;
2895 xlate_set_dl_tci(ctx);
2898 case OFPAT_SET_DL_SRC:
2899 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2900 memcpy(oa->dl_addr.dl_addr,
2901 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2902 memcpy(ctx->flow.dl_src,
2903 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2906 case OFPAT_SET_DL_DST:
2907 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2908 memcpy(oa->dl_addr.dl_addr,
2909 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2910 memcpy(ctx->flow.dl_dst,
2911 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2914 case OFPAT_SET_NW_SRC:
2915 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2916 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2919 case OFPAT_SET_NW_DST:
2920 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2921 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2924 case OFPAT_SET_NW_TOS:
2925 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
2926 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2929 case OFPAT_SET_TP_SRC:
2930 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2931 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
2934 case OFPAT_SET_TP_DST:
2935 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2936 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
2940 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2944 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
2948 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2955 xlate_actions(const union ofp_action *in, size_t n_in,
2956 const struct flow *flow, struct ofproto *ofproto,
2957 const struct ofpbuf *packet,
2958 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2959 uint16_t *nf_output_iface)
2961 struct action_xlate_ctx ctx;
2963 COVERAGE_INC(ofproto_ofp2odp);
2964 odp_actions_init(out);
2967 ctx.ofproto = ofproto;
2968 ctx.packet = packet;
2971 ctx.may_set_up_flow = true;
2972 ctx.nf_output_iface = NF_OUT_DROP;
2973 do_xlate_actions(in, n_in, &ctx);
2974 remove_pop_action(&ctx);
2976 /* Check with in-band control to see if we're allowed to set up this
2978 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
2979 ctx.may_set_up_flow = false;
2985 if (may_set_up_flow) {
2986 *may_set_up_flow = ctx.may_set_up_flow;
2988 if (nf_output_iface) {
2989 *nf_output_iface = ctx.nf_output_iface;
2991 if (odp_actions_overflow(out)) {
2992 COVERAGE_INC(odp_overflow);
2993 odp_actions_init(out);
2994 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2999 /* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow
3000 * error message code (composed with ofp_mkerr()) for the caller to propagate
3001 * upward. Otherwise, returns 0.
3003 * The log message mentions 'msg_type'. */
3005 reject_slave_controller(struct ofconn *ofconn, const const char *msg_type)
3007 if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) {
3008 static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
3009 VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
3012 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
3019 handle_packet_out(struct ofconn *ofconn, struct ofp_header *oh)
3021 struct ofproto *p = ofconn->ofproto;
3022 struct ofp_packet_out *opo;
3023 struct ofpbuf payload, *buffer;
3024 union ofp_action *ofp_actions;
3025 struct odp_actions odp_actions;
3026 struct ofpbuf request;
3028 size_t n_ofp_actions;
3032 COVERAGE_INC(ofproto_packet_out);
3034 error = reject_slave_controller(ofconn, "OFPT_PACKET_OUT");
3039 /* Get ofp_packet_out. */
3041 request.size = ntohs(oh->length);
3042 opo = ofpbuf_try_pull(&request, offsetof(struct ofp_packet_out, actions));
3044 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3048 error = ofputil_pull_actions(&request, ntohs(opo->actions_len),
3049 &ofp_actions, &n_ofp_actions);
3055 if (opo->buffer_id != htonl(UINT32_MAX)) {
3056 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
3058 if (error || !buffer) {
3067 /* Extract flow, check actions. */
3068 flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)),
3070 error = validate_actions(ofp_actions, n_ofp_actions, &flow, p->max_ports);
3076 error = xlate_actions(ofp_actions, n_ofp_actions, &flow, p, &payload,
3077 &odp_actions, NULL, NULL, NULL);
3079 dpif_execute(p->dpif, odp_actions.actions, odp_actions.n_actions,
3084 ofpbuf_delete(buffer);
3089 update_port_config(struct ofproto *p, struct ofport *port,
3090 uint32_t config, uint32_t mask)
3092 mask &= config ^ port->opp.config;
3093 if (mask & OFPPC_PORT_DOWN) {
3094 if (config & OFPPC_PORT_DOWN) {
3095 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
3097 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
3100 #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | \
3101 OFPPC_NO_FWD | OFPPC_NO_FLOOD)
3102 if (mask & REVALIDATE_BITS) {
3103 COVERAGE_INC(ofproto_costly_flags);
3104 port->opp.config ^= mask & REVALIDATE_BITS;
3105 p->need_revalidate = true;
3107 #undef REVALIDATE_BITS
3108 if (mask & OFPPC_NO_PACKET_IN) {
3109 port->opp.config ^= OFPPC_NO_PACKET_IN;
3114 handle_port_mod(struct ofconn *ofconn, struct ofp_header *oh)
3116 struct ofproto *p = ofconn->ofproto;
3117 const struct ofp_port_mod *opm;
3118 struct ofport *port;
3121 error = reject_slave_controller(ofconn, "OFPT_PORT_MOD");
3125 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
3129 opm = (struct ofp_port_mod *) oh;
3131 port = get_port(p, ofp_port_to_odp_port(ntohs(opm->port_no)));
3133 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
3134 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
3135 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
3137 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
3138 if (opm->advertise) {
3139 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
3145 static struct ofpbuf *
3146 make_ofp_stats_reply(ovs_be32 xid, ovs_be16 type, size_t body_len)
3148 struct ofp_stats_reply *osr;
3151 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
3152 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
3154 osr->flags = htons(0);
3158 static struct ofpbuf *
3159 start_ofp_stats_reply(const struct ofp_stats_request *request, size_t body_len)
3161 return make_ofp_stats_reply(request->header.xid, request->type, body_len);
3165 append_ofp_stats_reply(size_t nbytes, struct ofconn *ofconn,
3166 struct ofpbuf **msgp)
3168 struct ofpbuf *msg = *msgp;
3169 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
3170 if (nbytes + msg->size > UINT16_MAX) {
3171 struct ofp_stats_reply *reply = msg->data;
3172 reply->flags = htons(OFPSF_REPLY_MORE);
3173 *msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes);
3174 queue_tx(msg, ofconn, ofconn->reply_counter);
3176 return ofpbuf_put_uninit(*msgp, nbytes);
3179 static struct ofpbuf *
3180 make_nxstats_reply(ovs_be32 xid, ovs_be32 subtype, size_t body_len)
3182 struct nicira_stats_msg *nsm;
3185 msg = ofpbuf_new(MIN(sizeof *nsm + body_len, UINT16_MAX));
3186 nsm = put_openflow_xid(sizeof *nsm, OFPT_STATS_REPLY, xid, msg);
3187 nsm->type = htons(OFPST_VENDOR);
3188 nsm->flags = htons(0);
3189 nsm->vendor = htonl(NX_VENDOR_ID);
3190 nsm->subtype = htonl(subtype);
3194 static struct ofpbuf *
3195 start_nxstats_reply(const struct nicira_stats_msg *request, size_t body_len)
3197 return make_nxstats_reply(request->header.xid, request->subtype, body_len);
3201 append_nxstats_reply(size_t nbytes, struct ofconn *ofconn,
3202 struct ofpbuf **msgp)
3204 struct ofpbuf *msg = *msgp;
3205 assert(nbytes <= UINT16_MAX - sizeof(struct nicira_stats_msg));
3206 if (nbytes + msg->size > UINT16_MAX) {
3207 struct nicira_stats_msg *reply = msg->data;
3208 reply->flags = htons(OFPSF_REPLY_MORE);
3209 *msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes);
3210 queue_tx(msg, ofconn, ofconn->reply_counter);
3212 ofpbuf_prealloc_tailroom(*msgp, nbytes);
3216 handle_desc_stats_request(struct ofconn *ofconn,
3217 struct ofp_stats_request *request)
3219 struct ofproto *p = ofconn->ofproto;
3220 struct ofp_desc_stats *ods;
3223 msg = start_ofp_stats_reply(request, sizeof *ods);
3224 ods = append_ofp_stats_reply(sizeof *ods, ofconn, &msg);
3225 memset(ods, 0, sizeof *ods);
3226 ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc);
3227 ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc);
3228 ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
3229 ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
3230 ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
3231 queue_tx(msg, ofconn, ofconn->reply_counter);
3237 handle_table_stats_request(struct ofconn *ofconn,
3238 struct ofp_stats_request *request)
3240 struct ofproto *p = ofconn->ofproto;
3241 struct ofp_table_stats *ots;
3244 msg = start_ofp_stats_reply(request, sizeof *ots * 2);
3246 /* Classifier table. */
3247 ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg);
3248 memset(ots, 0, sizeof *ots);
3249 strcpy(ots->name, "classifier");
3250 ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10
3251 ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL));
3252 ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */
3253 ots->active_count = htonl(classifier_count(&p->cls));
3254 ots->lookup_count = htonll(0); /* XXX */
3255 ots->matched_count = htonll(0); /* XXX */
3257 queue_tx(msg, ofconn, ofconn->reply_counter);
3262 append_port_stat(struct ofport *port, struct ofconn *ofconn,
3263 struct ofpbuf **msgp)
3265 struct netdev_stats stats;
3266 struct ofp_port_stats *ops;
3268 /* Intentionally ignore return value, since errors will set
3269 * 'stats' to all-1s, which is correct for OpenFlow, and
3270 * netdev_get_stats() will log errors. */
3271 netdev_get_stats(port->netdev, &stats);
3273 ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp);
3274 ops->port_no = htons(port->opp.port_no);
3275 memset(ops->pad, 0, sizeof ops->pad);
3276 ops->rx_packets = htonll(stats.rx_packets);
3277 ops->tx_packets = htonll(stats.tx_packets);
3278 ops->rx_bytes = htonll(stats.rx_bytes);
3279 ops->tx_bytes = htonll(stats.tx_bytes);
3280 ops->rx_dropped = htonll(stats.rx_dropped);
3281 ops->tx_dropped = htonll(stats.tx_dropped);
3282 ops->rx_errors = htonll(stats.rx_errors);
3283 ops->tx_errors = htonll(stats.tx_errors);
3284 ops->rx_frame_err = htonll(stats.rx_frame_errors);
3285 ops->rx_over_err = htonll(stats.rx_over_errors);
3286 ops->rx_crc_err = htonll(stats.rx_crc_errors);
3287 ops->collisions = htonll(stats.collisions);
3291 handle_port_stats_request(struct ofconn *ofconn, struct ofp_stats_request *osr,
3294 struct ofproto *p = ofconn->ofproto;
3295 struct ofp_port_stats_request *psr;
3296 struct ofp_port_stats *ops;
3298 struct ofport *port;
3300 if (arg_size != sizeof *psr) {
3301 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3303 psr = (struct ofp_port_stats_request *) osr->body;
3305 msg = start_ofp_stats_reply(osr, sizeof *ops * 16);
3306 if (psr->port_no != htons(OFPP_NONE)) {
3307 port = get_port(p, ofp_port_to_odp_port(ntohs(psr->port_no)));
3309 append_port_stat(port, ofconn, &msg);
3312 HMAP_FOR_EACH (port, hmap_node, &p->ports) {
3313 append_port_stat(port, ofconn, &msg);
3317 queue_tx(msg, ofconn, ofconn->reply_counter);
3321 struct flow_stats_cbdata {
3322 struct ofconn *ofconn;
3327 /* Obtains statistic counters for 'rule' within 'p' and stores them into
3328 * '*packet_countp' and '*byte_countp'. The returned statistics include
3329 * statistics for all of 'rule''s facets. */
3331 query_stats(struct ofproto *p, struct rule *rule,
3332 uint64_t *packet_countp, uint64_t *byte_countp)
3334 uint64_t packet_count, byte_count;
3335 struct facet *facet;
3336 struct odp_flow *odp_flows;
3339 /* Start from historical data for 'rule' itself that are no longer tracked
3340 * by the datapath. This counts, for example, facets that have expired. */
3341 packet_count = rule->packet_count;
3342 byte_count = rule->byte_count;
3344 /* Prepare to ask the datapath for statistics on all of the rule's facets.
3346 * Also, add any statistics that are not tracked by the datapath for each
3347 * facet. This includes, for example, statistics for packets that were
3348 * executed "by hand" by ofproto via dpif_execute() but must be accounted
3350 odp_flows = xzalloc(list_size(&rule->facets) * sizeof *odp_flows);
3352 LIST_FOR_EACH (facet, list_node, &rule->facets) {
3353 struct odp_flow *odp_flow = &odp_flows[n_odp_flows++];
3354 odp_flow_key_from_flow(&odp_flow->key, &facet->flow);
3355 packet_count += facet->packet_count;
3356 byte_count += facet->byte_count;
3359 /* Fetch up-to-date statistics from the datapath and add them in. */
3360 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
3363 for (i = 0; i < n_odp_flows; i++) {
3364 struct odp_flow *odp_flow = &odp_flows[i];
3365 packet_count += odp_flow->stats.n_packets;
3366 byte_count += odp_flow->stats.n_bytes;
3371 /* Return the stats to the caller. */
3372 *packet_countp = packet_count;
3373 *byte_countp = byte_count;
3377 calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec)
3379 long long int msecs = time_msec() - start;
3380 *sec = htonl(msecs / 1000);
3381 *nsec = htonl((msecs % 1000) * (1000 * 1000));
3385 flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
3387 struct rule *rule = rule_from_cls_rule(rule_);
3388 struct flow_stats_cbdata *cbdata = cbdata_;
3389 struct ofp_flow_stats *ofs;
3390 uint64_t packet_count, byte_count;
3391 size_t act_len, len;
3393 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3397 act_len = sizeof *rule->actions * rule->n_actions;
3398 len = offsetof(struct ofp_flow_stats, actions) + act_len;
3400 query_stats(cbdata->ofconn->ofproto, rule, &packet_count, &byte_count);
3402 ofs = append_ofp_stats_reply(len, cbdata->ofconn, &cbdata->msg);
3403 ofs->length = htons(len);
3406 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3407 cbdata->ofconn->flow_format, &ofs->match);
3408 calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec);
3409 ofs->cookie = rule->flow_cookie;
3410 ofs->priority = htons(rule->cr.priority);
3411 ofs->idle_timeout = htons(rule->idle_timeout);
3412 ofs->hard_timeout = htons(rule->hard_timeout);
3413 memset(ofs->pad2, 0, sizeof ofs->pad2);
3414 ofs->packet_count = htonll(packet_count);
3415 ofs->byte_count = htonll(byte_count);
3416 if (rule->n_actions > 0) {
3417 memcpy(ofs->actions, rule->actions, act_len);
3422 table_id_to_include(uint8_t table_id)
3424 return table_id == 0 || table_id == 0xff ? CLS_INC_ALL : 0;
3428 handle_flow_stats_request(struct ofconn *ofconn,
3429 const struct ofp_stats_request *osr, size_t arg_size)
3431 struct ofp_flow_stats_request *fsr;
3432 struct flow_stats_cbdata cbdata;
3433 struct cls_rule target;
3435 if (arg_size != sizeof *fsr) {
3436 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3438 fsr = (struct ofp_flow_stats_request *) osr->body;
3440 COVERAGE_INC(ofproto_flows_req);
3441 cbdata.ofconn = ofconn;
3442 cbdata.out_port = fsr->out_port;
3443 cbdata.msg = start_ofp_stats_reply(osr, 1024);
3444 cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0, &target);
3445 classifier_for_each_match(&ofconn->ofproto->cls, &target,
3446 table_id_to_include(fsr->table_id),
3447 flow_stats_cb, &cbdata);
3448 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3453 nx_flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
3455 struct rule *rule = rule_from_cls_rule(rule_);
3456 struct flow_stats_cbdata *cbdata = cbdata_;
3457 struct nx_flow_stats *nfs;
3458 uint64_t packet_count, byte_count;
3459 size_t act_len, start_len;
3461 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3465 query_stats(cbdata->ofconn->ofproto, rule, &packet_count, &byte_count);
3467 act_len = sizeof *rule->actions * rule->n_actions;
3469 start_len = cbdata->msg->size;
3470 append_nxstats_reply(sizeof *nfs + NXM_MAX_LEN + act_len,
3471 cbdata->ofconn, &cbdata->msg);
3472 nfs = ofpbuf_put_uninit(cbdata->msg, sizeof *nfs);
3475 calc_flow_duration(rule->created, &nfs->duration_sec, &nfs->duration_nsec);
3476 nfs->cookie = rule->flow_cookie;
3477 nfs->priority = htons(rule->cr.priority);
3478 nfs->idle_timeout = htons(rule->idle_timeout);
3479 nfs->hard_timeout = htons(rule->hard_timeout);
3480 nfs->match_len = htons(nx_put_match(cbdata->msg, &rule->cr));
3481 memset(nfs->pad2, 0, sizeof nfs->pad2);
3482 nfs->packet_count = htonll(packet_count);
3483 nfs->byte_count = htonll(byte_count);
3484 if (rule->n_actions > 0) {
3485 ofpbuf_put(cbdata->msg, rule->actions, act_len);
3487 nfs->length = htons(cbdata->msg->size - start_len);
3491 handle_nxst_flow(struct ofconn *ofconn, struct ofpbuf *b)
3493 struct nx_flow_stats_request *nfsr;
3494 struct flow_stats_cbdata cbdata;
3495 struct cls_rule target;
3498 /* Dissect the message. */
3499 nfsr = ofpbuf_try_pull(b, sizeof *nfsr);
3501 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3503 error = nx_pull_match(b, ntohs(nfsr->match_len), 0, &target);
3508 COVERAGE_INC(ofproto_flows_req);
3509 cbdata.ofconn = ofconn;
3510 cbdata.out_port = nfsr->out_port;
3511 cbdata.msg = start_nxstats_reply(&nfsr->nsm, 1024);
3512 classifier_for_each_match(&ofconn->ofproto->cls, &target,
3513 table_id_to_include(nfsr->table_id),
3514 nx_flow_stats_cb, &cbdata);
3515 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3519 struct flow_stats_ds_cbdata {
3520 struct ofproto *ofproto;
3525 flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
3527 struct rule *rule = rule_from_cls_rule(rule_);
3528 struct flow_stats_ds_cbdata *cbdata = cbdata_;
3529 struct ds *results = cbdata->results;
3530 struct ofp_match match;
3531 uint64_t packet_count, byte_count;
3532 size_t act_len = sizeof *rule->actions * rule->n_actions;
3534 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3535 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3536 NXFF_OPENFLOW10, &match);
3538 ds_put_format(results, "duration=%llds, ",
3539 (time_msec() - rule->created) / 1000);
3540 ds_put_format(results, "priority=%u, ", rule->cr.priority);
3541 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
3542 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
3543 ofp_print_match(results, &match, true);
3545 ofp_print_actions(results, &rule->actions->header, act_len);
3547 ds_put_cstr(results, "drop");
3549 ds_put_cstr(results, "\n");
3552 /* Adds a pretty-printed description of all flows to 'results', including
3553 * those marked hidden by secchan (e.g., by in-band control). */
3555 ofproto_get_all_flows(struct ofproto *p, struct ds *results)
3557 struct ofp_match match;
3558 struct cls_rule target;
3559 struct flow_stats_ds_cbdata cbdata;
3561 memset(&match, 0, sizeof match);
3562 match.wildcards = htonl(OVSFW_ALL);
3565 cbdata.results = results;
3567 cls_rule_from_match(&match, 0, NXFF_OPENFLOW10, 0, &target);
3568 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3569 flow_stats_ds_cb, &cbdata);
3572 struct aggregate_stats_cbdata {
3573 struct ofproto *ofproto;
3575 uint64_t packet_count;
3576 uint64_t byte_count;
3581 aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
3583 struct rule *rule = rule_from_cls_rule(rule_);
3584 struct aggregate_stats_cbdata *cbdata = cbdata_;
3585 uint64_t packet_count, byte_count;
3587 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3591 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3593 cbdata->packet_count += packet_count;
3594 cbdata->byte_count += byte_count;
3599 query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target,
3600 ovs_be16 out_port, uint8_t table_id,
3601 struct ofp_aggregate_stats_reply *oasr)
3603 struct aggregate_stats_cbdata cbdata;
3605 COVERAGE_INC(ofproto_agg_request);
3606 cbdata.ofproto = ofproto;
3607 cbdata.out_port = out_port;
3608 cbdata.packet_count = 0;
3609 cbdata.byte_count = 0;
3611 classifier_for_each_match(&ofproto->cls, target,
3612 table_id_to_include(table_id),
3613 aggregate_stats_cb, &cbdata);
3615 oasr->flow_count = htonl(cbdata.n_flows);
3616 oasr->packet_count = htonll(cbdata.packet_count);
3617 oasr->byte_count = htonll(cbdata.byte_count);
3618 memset(oasr->pad, 0, sizeof oasr->pad);
3622 handle_aggregate_stats_request(struct ofconn *ofconn,
3623 const struct ofp_stats_request *osr,
3626 struct ofp_aggregate_stats_request *request;
3627 struct ofp_aggregate_stats_reply *reply;
3628 struct cls_rule target;
3631 if (arg_size != sizeof *request) {
3632 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3634 request = (struct ofp_aggregate_stats_request *) osr->body;
3636 cls_rule_from_match(&request->match, 0, NXFF_OPENFLOW10, 0, &target);
3638 msg = start_ofp_stats_reply(osr, sizeof *reply);
3639 reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg);
3640 query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
3641 request->table_id, reply);
3642 queue_tx(msg, ofconn, ofconn->reply_counter);
3647 handle_nxst_aggregate(struct ofconn *ofconn, struct ofpbuf *b)
3649 struct nx_aggregate_stats_request *request;
3650 struct ofp_aggregate_stats_reply *reply;
3651 struct cls_rule target;
3655 /* Dissect the message. */
3656 request = ofpbuf_try_pull(b, sizeof *request);
3658 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3660 error = nx_pull_match(b, ntohs(request->match_len), 0, &target);
3666 COVERAGE_INC(ofproto_flows_req);
3667 buf = start_nxstats_reply(&request->nsm, sizeof *reply);
3668 reply = ofpbuf_put_uninit(buf, sizeof *reply);
3669 query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
3670 request->table_id, reply);
3671 queue_tx(buf, ofconn, ofconn->reply_counter);
3676 struct queue_stats_cbdata {
3677 struct ofconn *ofconn;
3678 struct ofport *ofport;
3683 put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
3684 const struct netdev_queue_stats *stats)
3686 struct ofp_queue_stats *reply;
3688 reply = append_ofp_stats_reply(sizeof *reply, cbdata->ofconn, &cbdata->msg);
3689 reply->port_no = htons(cbdata->ofport->opp.port_no);
3690 memset(reply->pad, 0, sizeof reply->pad);
3691 reply->queue_id = htonl(queue_id);
3692 reply->tx_bytes = htonll(stats->tx_bytes);
3693 reply->tx_packets = htonll(stats->tx_packets);
3694 reply->tx_errors = htonll(stats->tx_errors);
3698 handle_queue_stats_dump_cb(uint32_t queue_id,
3699 struct netdev_queue_stats *stats,
3702 struct queue_stats_cbdata *cbdata = cbdata_;
3704 put_queue_stats(cbdata, queue_id, stats);
3708 handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id,
3709 struct queue_stats_cbdata *cbdata)
3711 cbdata->ofport = port;
3712 if (queue_id == OFPQ_ALL) {
3713 netdev_dump_queue_stats(port->netdev,
3714 handle_queue_stats_dump_cb, cbdata);
3716 struct netdev_queue_stats stats;
3718 if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) {
3719 put_queue_stats(cbdata, queue_id, &stats);
3725 handle_queue_stats_request(struct ofconn *ofconn,
3726 const struct ofp_stats_request *osr,
3729 struct ofproto *ofproto = ofconn->ofproto;
3730 struct ofp_queue_stats_request *qsr;
3731 struct queue_stats_cbdata cbdata;
3732 struct ofport *port;
3733 unsigned int port_no;
3736 if (arg_size != sizeof *qsr) {
3737 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3739 qsr = (struct ofp_queue_stats_request *) osr->body;
3741 COVERAGE_INC(ofproto_queue_req);
3743 cbdata.ofconn = ofconn;
3744 cbdata.msg = start_ofp_stats_reply(osr, 128);
3746 port_no = ntohs(qsr->port_no);
3747 queue_id = ntohl(qsr->queue_id);
3748 if (port_no == OFPP_ALL) {
3749 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
3750 handle_queue_stats_for_port(port, queue_id, &cbdata);
3752 } else if (port_no < ofproto->max_ports) {
3753 port = get_port(ofproto, ofp_port_to_odp_port(port_no));
3755 handle_queue_stats_for_port(port, queue_id, &cbdata);
3758 ofpbuf_delete(cbdata.msg);
3759 return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
3761 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3767 handle_vendor_stats_request(struct ofconn *ofconn,
3768 struct ofp_stats_request *osr, size_t arg_size)
3770 struct nicira_stats_msg *nsm;
3775 VLOG_WARN_RL(&rl, "truncated vendor stats request body");
3776 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3779 memcpy(&vendor, osr->body, sizeof vendor);
3780 if (vendor != htonl(NX_VENDOR_ID)) {
3781 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3784 if (ntohs(osr->header.length) < sizeof(struct nicira_stats_msg)) {
3785 VLOG_WARN_RL(&rl, "truncated Nicira stats request");
3786 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3789 nsm = (struct nicira_stats_msg *) osr;
3791 b.size = ntohs(nsm->header.length);
3792 switch (ntohl(nsm->subtype)) {
3794 return handle_nxst_flow(ofconn, &b);
3796 case NXST_AGGREGATE:
3797 return handle_nxst_aggregate(ofconn, &b);
3800 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3805 handle_stats_request(struct ofconn *ofconn, struct ofp_header *oh)
3807 struct ofp_stats_request *osr;
3811 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
3816 osr = (struct ofp_stats_request *) oh;
3818 switch (ntohs(osr->type)) {
3820 return handle_desc_stats_request(ofconn, osr);
3823 return handle_flow_stats_request(ofconn, osr, arg_size);
3825 case OFPST_AGGREGATE:
3826 return handle_aggregate_stats_request(ofconn, osr, arg_size);
3829 return handle_table_stats_request(ofconn, osr);
3832 return handle_port_stats_request(ofconn, osr, arg_size);
3835 return handle_queue_stats_request(ofconn, osr, arg_size);
3838 return handle_vendor_stats_request(ofconn, osr, arg_size);
3841 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
3845 static long long int
3846 msec_from_nsec(uint64_t sec, uint32_t nsec)
3848 return !sec ? 0 : sec * 1000 + nsec / 1000000;
3852 facet_update_time(struct ofproto *ofproto, struct facet *facet,
3853 const struct odp_flow_stats *stats)
3855 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
3856 if (used > facet->used) {
3858 if (used > facet->rule->used) {
3859 facet->rule->used = used;
3861 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
3865 /* Folds the statistics from 'stats' into the counters in 'facet'.
3867 * Because of the meaning of a facet's counters, it only makes sense to do this
3868 * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
3869 * packet that was sent by hand or if it represents statistics that have been
3870 * cleared out of the datapath. */
3872 facet_update_stats(struct ofproto *ofproto, struct facet *facet,
3873 const struct odp_flow_stats *stats)
3875 if (stats->n_packets) {
3876 facet_update_time(ofproto, facet, stats);
3877 facet->packet_count += stats->n_packets;
3878 facet->byte_count += stats->n_bytes;
3879 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
3887 uint16_t idle_timeout;
3888 uint16_t hard_timeout;
3892 union ofp_action *actions;
3896 /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
3897 * in which no matching flow already exists in the flow table.
3899 * Adds the flow specified by 'ofm', which is followed by 'n_actions'
3900 * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
3901 * OpenFlow error code as encoded by ofp_mkerr() on failure.
3903 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3906 add_flow(struct ofconn *ofconn, struct flow_mod *fm)
3908 struct ofproto *p = ofconn->ofproto;
3909 struct ofpbuf *packet;
3914 if (fm->flags & OFPFF_CHECK_OVERLAP
3915 && classifier_rule_overlaps(&p->cls, &fm->cr)) {
3916 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
3920 if (fm->buffer_id != UINT32_MAX) {
3921 error = pktbuf_retrieve(ofconn->pktbuf, fm->buffer_id,
3925 in_port = UINT16_MAX;
3928 rule = rule_create(&fm->cr, fm->actions, fm->n_actions,
3929 fm->idle_timeout, fm->hard_timeout, fm->cookie,
3930 fm->flags & OFPFF_SEND_FLOW_REM);
3931 rule_insert(p, rule);
3933 rule_execute(p, rule, in_port, packet);
3938 static struct rule *
3939 find_flow_strict(struct ofproto *p, const struct flow_mod *fm)
3941 return rule_from_cls_rule(classifier_find_rule_exactly(&p->cls, &fm->cr));
3945 send_buffered_packet(struct ofconn *ofconn,
3946 struct rule *rule, uint32_t buffer_id)
3948 struct ofpbuf *packet;
3952 if (buffer_id == UINT32_MAX) {
3956 error = pktbuf_retrieve(ofconn->pktbuf, buffer_id, &packet, &in_port);
3961 rule_execute(ofconn->ofproto, rule, in_port, packet);
3966 /* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
3968 struct modify_flows_cbdata {
3969 struct ofproto *ofproto;
3970 const struct flow_mod *fm;
3974 static int modify_flow(struct ofproto *, const struct flow_mod *,
3976 static void modify_flows_cb(struct cls_rule *, void *cbdata_);
3978 /* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
3979 * encoded by ofp_mkerr() on failure.
3981 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3984 modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
3986 struct modify_flows_cbdata cbdata;
3988 cbdata.ofproto = ofconn->ofproto;
3990 cbdata.match = NULL;
3992 classifier_for_each_match(&ofconn->ofproto->cls, &fm->cr, CLS_INC_ALL,
3993 modify_flows_cb, &cbdata);
3995 /* This credits the packet to whichever flow happened to happened to
3996 * match last. That's weird. Maybe we should do a lookup for the
3997 * flow that actually matches the packet? Who knows. */
3998 send_buffered_packet(ofconn, cbdata.match, fm->buffer_id);
4001 return add_flow(ofconn, fm);
4005 /* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
4006 * code as encoded by ofp_mkerr() on failure.
4008 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
4011 modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm)
4013 struct ofproto *p = ofconn->ofproto;
4014 struct rule *rule = find_flow_strict(p, fm);
4015 if (rule && !rule_is_hidden(rule)) {
4016 modify_flow(p, fm, rule);
4017 return send_buffered_packet(ofconn, rule, fm->buffer_id);
4019 return add_flow(ofconn, fm);
4023 /* Callback for modify_flows_loose(). */
4025 modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
4027 struct rule *rule = rule_from_cls_rule(rule_);
4028 struct modify_flows_cbdata *cbdata = cbdata_;
4030 if (!rule_is_hidden(rule)) {
4031 cbdata->match = rule;
4032 modify_flow(cbdata->ofproto, cbdata->fm, rule);
4036 /* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
4037 * been identified as a flow in 'p''s flow table to be modified, by changing
4038 * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
4039 * ofp_action[] structures). */
4041 modify_flow(struct ofproto *p, const struct flow_mod *fm, struct rule *rule)
4043 size_t actions_len = fm->n_actions * sizeof *rule->actions;
4045 rule->flow_cookie = fm->cookie;
4047 /* If the actions are the same, do nothing. */
4048 if (fm->n_actions == rule->n_actions
4050 || !memcmp(fm->actions, rule->actions, actions_len))) {
4054 /* Replace actions. */
4055 free(rule->actions);
4056 rule->actions = fm->n_actions ? xmemdup(fm->actions, actions_len) : NULL;
4057 rule->n_actions = fm->n_actions;
4059 p->need_revalidate = true;
4064 /* OFPFC_DELETE implementation. */
4066 struct delete_flows_cbdata {
4067 struct ofproto *ofproto;
4071 static void delete_flows_cb(struct cls_rule *, void *cbdata_);
4072 static void delete_flow(struct ofproto *, struct rule *, ovs_be16 out_port);
4074 /* Implements OFPFC_DELETE. */
4076 delete_flows_loose(struct ofproto *p, const struct flow_mod *fm)
4078 struct delete_flows_cbdata cbdata;
4081 cbdata.out_port = htons(fm->out_port);
4083 classifier_for_each_match(&p->cls, &fm->cr, CLS_INC_ALL,
4084 delete_flows_cb, &cbdata);
4087 /* Implements OFPFC_DELETE_STRICT. */
4089 delete_flow_strict(struct ofproto *p, struct flow_mod *fm)
4091 struct rule *rule = find_flow_strict(p, fm);
4093 delete_flow(p, rule, htons(fm->out_port));
4097 /* Callback for delete_flows_loose(). */
4099 delete_flows_cb(struct cls_rule *rule_, void *cbdata_)
4101 struct rule *rule = rule_from_cls_rule(rule_);
4102 struct delete_flows_cbdata *cbdata = cbdata_;
4104 delete_flow(cbdata->ofproto, rule, cbdata->out_port);
4107 /* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
4108 * been identified as a flow to delete from 'p''s flow table, by deleting the
4109 * flow and sending out a OFPT_FLOW_REMOVED message to any interested
4112 * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
4113 * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
4114 * specified 'out_port'. */
4116 delete_flow(struct ofproto *p, struct rule *rule, ovs_be16 out_port)
4118 if (rule_is_hidden(rule)) {
4122 if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
4126 rule_send_removed(p, rule, OFPRR_DELETE);
4127 rule_remove(p, rule);
4131 flow_mod_core(struct ofconn *ofconn, struct flow_mod *fm)
4133 struct ofproto *p = ofconn->ofproto;
4136 error = reject_slave_controller(ofconn, "flow_mod");
4141 error = validate_actions(fm->actions, fm->n_actions,
4142 &fm->cr.flow, p->max_ports);
4147 /* We do not support the emergency flow cache. It will hopefully
4148 * get dropped from OpenFlow in the near future. */
4149 if (fm->flags & OFPFF_EMERG) {
4150 /* There isn't a good fit for an error code, so just state that the
4151 * flow table is full. */
4152 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
4155 switch (fm->command) {
4157 return add_flow(ofconn, fm);
4160 return modify_flows_loose(ofconn, fm);
4162 case OFPFC_MODIFY_STRICT:
4163 return modify_flow_strict(ofconn, fm);
4166 delete_flows_loose(p, fm);
4169 case OFPFC_DELETE_STRICT:
4170 delete_flow_strict(p, fm);
4174 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
4179 handle_ofpt_flow_mod(struct ofconn *ofconn, struct ofp_header *oh)
4181 struct ofp_match orig_match;
4182 struct ofp_flow_mod *ofm;
4188 b.size = ntohs(oh->length);
4190 /* Dissect the message. */
4191 ofm = ofpbuf_try_pull(&b, sizeof *ofm);
4193 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4195 error = ofputil_pull_actions(&b, b.size, &fm.actions, &fm.n_actions);
4200 /* Normalize ofm->match. If normalization actually changes anything, then
4201 * log the differences. */
4202 ofm->match.pad1[0] = ofm->match.pad2[0] = 0;
4203 orig_match = ofm->match;
4204 normalize_match(&ofm->match);
4205 if (memcmp(&ofm->match, &orig_match, sizeof orig_match)) {
4206 static struct vlog_rate_limit normal_rl = VLOG_RATE_LIMIT_INIT(1, 1);
4207 if (!VLOG_DROP_INFO(&normal_rl)) {
4208 char *old = ofp_match_to_literal_string(&orig_match);
4209 char *new = ofp_match_to_literal_string(&ofm->match);
4210 VLOG_INFO("%s: normalization changed ofp_match, details:",
4211 rconn_get_name(ofconn->rconn));
4212 VLOG_INFO(" pre: %s", old);
4213 VLOG_INFO("post: %s", new);
4219 /* Translate the message. */
4220 cls_rule_from_match(&ofm->match, ntohs(ofm->priority), ofconn->flow_format,
4221 ofm->cookie, &fm.cr);
4222 fm.cookie = ofm->cookie;
4223 fm.command = ntohs(ofm->command);
4224 fm.idle_timeout = ntohs(ofm->idle_timeout);
4225 fm.hard_timeout = ntohs(ofm->hard_timeout);
4226 fm.buffer_id = ntohl(ofm->buffer_id);
4227 fm.out_port = ntohs(ofm->out_port);
4228 fm.flags = ntohs(ofm->flags);
4230 /* Execute the command. */
4231 return flow_mod_core(ofconn, &fm);
4235 handle_nxt_flow_mod(struct ofconn *ofconn, struct ofp_header *oh)
4237 struct nx_flow_mod *nfm;
4243 b.size = ntohs(oh->length);
4245 /* Dissect the message. */
4246 nfm = ofpbuf_try_pull(&b, sizeof *nfm);
4248 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4250 error = nx_pull_match(&b, ntohs(nfm->match_len), ntohs(nfm->priority),
4255 error = ofputil_pull_actions(&b, b.size, &fm.actions, &fm.n_actions);
4260 /* Translate the message. */
4261 fm.cookie = nfm->cookie;
4262 fm.command = ntohs(nfm->command);
4263 fm.idle_timeout = ntohs(nfm->idle_timeout);
4264 fm.hard_timeout = ntohs(nfm->hard_timeout);
4265 fm.buffer_id = ntohl(nfm->buffer_id);
4266 fm.out_port = ntohs(nfm->out_port);
4267 fm.flags = ntohs(nfm->flags);
4269 /* Execute the command. */
4270 return flow_mod_core(ofconn, &fm);
4274 handle_tun_id_from_cookie(struct ofconn *ofconn, struct nxt_tun_id_cookie *msg)
4278 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
4283 ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
4288 handle_role_request(struct ofconn *ofconn, struct nicira_header *msg)
4290 struct nx_role_request *nrr;
4291 struct nx_role_request *reply;
4295 if (ntohs(msg->header.length) != sizeof *nrr) {
4296 VLOG_WARN_RL(&rl, "received role request of length %u (expected %zu)",
4297 ntohs(msg->header.length), sizeof *nrr);
4298 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4300 nrr = (struct nx_role_request *) msg;
4302 if (ofconn->type != OFCONN_PRIMARY) {
4303 VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
4305 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
4308 role = ntohl(nrr->role);
4309 if (role != NX_ROLE_OTHER && role != NX_ROLE_MASTER
4310 && role != NX_ROLE_SLAVE) {
4311 VLOG_WARN_RL(&rl, "received request for unknown role %"PRIu32, role);
4313 /* There's no good error code for this. */
4314 return ofp_mkerr(OFPET_BAD_REQUEST, -1);
4317 if (role == NX_ROLE_MASTER) {
4318 struct ofconn *other;
4320 HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) {
4321 if (other->role == NX_ROLE_MASTER) {
4322 other->role = NX_ROLE_SLAVE;
4326 ofconn->role = role;
4328 reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, msg->header.xid,
4330 reply->role = htonl(role);
4331 queue_tx(buf, ofconn, ofconn->reply_counter);
4337 handle_nxt_set_flow_format(struct ofconn *ofconn,
4338 struct nxt_set_flow_format *msg)
4343 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
4348 format = ntohl(msg->format);
4349 if (format == NXFF_OPENFLOW10
4350 || format == NXFF_TUN_ID_FROM_COOKIE
4351 || format == NXFF_NXM) {
4352 ofconn->flow_format = format;
4355 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
4360 handle_vendor(struct ofconn *ofconn, void *msg)
4362 struct ofproto *p = ofconn->ofproto;
4363 struct ofp_vendor_header *ovh = msg;
4364 struct nicira_header *nh;
4366 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
4367 VLOG_WARN_RL(&rl, "received vendor message of length %u "
4368 "(expected at least %zu)",
4369 ntohs(ovh->header.length), sizeof(struct ofp_vendor_header));
4370 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4372 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
4373 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
4375 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
4376 VLOG_WARN_RL(&rl, "received Nicira vendor message of length %u "
4377 "(expected at least %zu)",
4378 ntohs(ovh->header.length), sizeof(struct nicira_header));
4379 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
4383 switch (ntohl(nh->subtype)) {
4384 case NXT_STATUS_REQUEST:
4385 return switch_status_handle_request(p->switch_status, ofconn->rconn,
4388 case NXT_TUN_ID_FROM_COOKIE:
4389 return handle_tun_id_from_cookie(ofconn, msg);
4391 case NXT_ROLE_REQUEST:
4392 return handle_role_request(ofconn, msg);
4394 case NXT_SET_FLOW_FORMAT:
4395 return handle_nxt_set_flow_format(ofconn, msg);
4398 return handle_nxt_flow_mod(ofconn, &ovh->header);
4401 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
4405 handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh)
4407 struct ofp_header *ob;
4410 /* Currently, everything executes synchronously, so we can just
4411 * immediately send the barrier reply. */
4412 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
4413 queue_tx(buf, ofconn, ofconn->reply_counter);
4418 handle_openflow(struct ofconn *ofconn, struct ofpbuf *ofp_msg)
4420 struct ofp_header *oh = ofp_msg->data;
4423 COVERAGE_INC(ofproto_recv_openflow);
4425 case OFPT_ECHO_REQUEST:
4426 error = handle_echo_request(ofconn, oh);
4429 case OFPT_ECHO_REPLY:
4433 case OFPT_FEATURES_REQUEST:
4434 error = handle_features_request(ofconn, oh);
4437 case OFPT_GET_CONFIG_REQUEST:
4438 error = handle_get_config_request(ofconn, oh);
4441 case OFPT_SET_CONFIG:
4442 error = handle_set_config(ofconn, ofp_msg->data);
4445 case OFPT_PACKET_OUT:
4446 error = handle_packet_out(ofconn, ofp_msg->data);
4450 error = handle_port_mod(ofconn, oh);
4454 error = handle_ofpt_flow_mod(ofconn, ofp_msg->data);
4457 case OFPT_STATS_REQUEST:
4458 error = handle_stats_request(ofconn, oh);
4462 error = handle_vendor(ofconn, ofp_msg->data);
4465 case OFPT_BARRIER_REQUEST:
4466 error = handle_barrier_request(ofconn, oh);
4470 if (VLOG_IS_WARN_ENABLED()) {
4471 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
4472 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
4475 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
4480 send_error_oh(ofconn, ofp_msg->data, error);
4485 handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
4487 struct odp_msg *msg = packet->data;
4488 struct ofpbuf payload;
4489 struct facet *facet;
4492 payload.data = msg + 1;
4493 payload.size = msg->length - sizeof *msg;
4494 flow_extract(&payload, msg->arg, msg->port, &flow);
4496 /* Check with in-band control to see if this packet should be sent
4497 * to the local port regardless of the flow table. */
4498 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
4499 union odp_action action;
4501 memset(&action, 0, sizeof(action));
4502 action.output.type = ODPAT_OUTPUT;
4503 action.output.port = ODPP_LOCAL;
4504 dpif_execute(p->dpif, &action, 1, &payload);
4507 facet = facet_lookup_valid(p, &flow);
4509 struct rule *rule = rule_lookup(p, &flow);
4511 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
4512 struct ofport *port = get_port(p, msg->port);
4514 if (port->opp.config & OFPPC_NO_PACKET_IN) {
4515 COVERAGE_INC(ofproto_no_packet_in);
4516 /* XXX install 'drop' flow entry */
4517 ofpbuf_delete(packet);
4521 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
4525 COVERAGE_INC(ofproto_packet_in);
4526 send_packet_in(p, packet);
4530 facet = facet_create(p, rule, &flow, packet);
4531 } else if (!facet->may_install) {
4532 /* The facet is not installable, that is, we need to process every
4533 * packet, so process the current packet's actions into 'facet'. */
4534 facet_make_actions(p, facet, packet);
4537 if (facet->rule->cr.priority == FAIL_OPEN_PRIORITY) {
4539 * Extra-special case for fail-open mode.
4541 * We are in fail-open mode and the packet matched the fail-open rule,
4542 * but we are connected to a controller too. We should send the packet
4543 * up to the controller in the hope that it will try to set up a flow
4544 * and thereby allow us to exit fail-open.
4546 * See the top-level comment in fail-open.c for more information.
4548 send_packet_in(p, ofpbuf_clone_with_headroom(packet,
4549 DPIF_RECV_MSG_PADDING));
4552 ofpbuf_pull(packet, sizeof *msg);
4553 facet_execute(p, facet, packet);
4554 facet_install(p, facet, false);
4558 handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
4560 struct odp_msg *msg = packet->data;
4562 switch (msg->type) {
4563 case _ODPL_ACTION_NR:
4564 COVERAGE_INC(ofproto_ctlr_action);
4565 send_packet_in(p, packet);
4568 case _ODPL_SFLOW_NR:
4570 ofproto_sflow_received(p->sflow, msg);
4572 ofpbuf_delete(packet);
4576 handle_odp_miss_msg(p, packet);
4580 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
4586 /* Flow expiration. */
4588 struct expire_cbdata {
4589 struct ofproto *ofproto;
4593 static int ofproto_dp_max_idle(const struct ofproto *);
4594 static void ofproto_update_used(struct ofproto *);
4595 static void rule_expire(struct cls_rule *, void *cbdata);
4596 static void ofproto_expire_facets(struct ofproto *, int dp_max_idle);
4598 /* This function is called periodically by ofproto_run(). Its job is to
4599 * collect updates for the flows that have been installed into the datapath,
4600 * most importantly when they last were used, and then use that information to
4601 * expire flows that have not been used recently.
4603 * Returns the number of milliseconds after which it should be called again. */
4605 ofproto_expire(struct ofproto *ofproto)
4607 struct expire_cbdata cbdata;
4609 /* Update 'used' for each flow in the datapath. */
4610 ofproto_update_used(ofproto);
4612 /* Expire facets that have been idle too long. */
4613 cbdata.dp_max_idle = ofproto_dp_max_idle(ofproto);
4614 ofproto_expire_facets(ofproto, cbdata.dp_max_idle);
4616 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
4617 cbdata.ofproto = ofproto;
4618 classifier_for_each(&ofproto->cls, CLS_INC_ALL, rule_expire, &cbdata);
4620 /* Let the hook know that we're at a stable point: all outstanding data
4621 * in existing flows has been accounted to the account_cb. Thus, the
4622 * hook can now reasonably do operations that depend on having accurate
4623 * flow volume accounting (currently, that's just bond rebalancing). */
4624 if (ofproto->ofhooks->account_checkpoint_cb) {
4625 ofproto->ofhooks->account_checkpoint_cb(ofproto->aux);
4628 return MIN(cbdata.dp_max_idle, 1000);
4631 /* Update 'used' member of installed facets. */
4633 ofproto_update_used(struct ofproto *p)
4635 struct odp_flow *flows;
4640 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
4645 for (i = 0; i < n_flows; i++) {
4646 struct odp_flow *f = &flows[i];
4647 struct facet *facet;
4650 odp_flow_key_to_flow(&f->key, &flow);
4651 facet = facet_find(p, &flow);
4653 if (facet && facet->installed) {
4654 facet_update_time(p, facet, &f->stats);
4655 facet_account(p, facet, f->stats.n_bytes);
4657 /* There's a flow in the datapath that we know nothing about.
4659 COVERAGE_INC(ofproto_unexpected_rule);
4660 dpif_flow_del(p->dpif, f);
4667 /* Calculates and returns the number of milliseconds of idle time after which
4668 * facets should expire from the datapath and we should fold their statistics
4669 * into their parent rules in userspace. */
4671 ofproto_dp_max_idle(const struct ofproto *ofproto)
4674 * Idle time histogram.
4676 * Most of the time a switch has a relatively small number of facets. When
4677 * this is the case we might as well keep statistics for all of them in
4678 * userspace and to cache them in the kernel datapath for performance as
4681 * As the number of facets increases, the memory required to maintain
4682 * statistics about them in userspace and in the kernel becomes
4683 * significant. However, with a large number of facets it is likely that
4684 * only a few of them are "heavy hitters" that consume a large amount of
4685 * bandwidth. At this point, only heavy hitters are worth caching in the
4686 * kernel and maintaining in userspaces; other facets we can discard.
4688 * The technique used to compute the idle time is to build a histogram with
4689 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet
4690 * that is installed in the kernel gets dropped in the appropriate bucket.
4691 * After the histogram has been built, we compute the cutoff so that only
4692 * the most-recently-used 1% of facets (but at least 1000 flows) are kept
4693 * cached. At least the most-recently-used bucket of facets is kept, so
4694 * actually an arbitrary number of facets can be kept in any given
4695 * expiration run (though the next run will delete most of those unless
4696 * they receive additional data).
4698 * This requires a second pass through the facets, in addition to the pass
4699 * made by ofproto_update_used(), because the former function never looks
4700 * at uninstallable facets.
4702 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
4703 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
4704 int buckets[N_BUCKETS] = { 0 };
4705 struct facet *facet;
4710 total = hmap_count(&ofproto->facets);
4711 if (total <= 1000) {
4712 return N_BUCKETS * BUCKET_WIDTH;
4715 /* Build histogram. */
4717 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
4718 long long int idle = now - facet->used;
4719 int bucket = (idle <= 0 ? 0
4720 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
4721 : (unsigned int) idle / BUCKET_WIDTH);
4725 /* Find the first bucket whose flows should be expired. */
4726 for (bucket = 0; bucket < N_BUCKETS; bucket++) {
4727 if (buckets[bucket]) {
4730 subtotal += buckets[bucket++];
4731 } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
4736 if (VLOG_IS_DBG_ENABLED()) {
4740 ds_put_cstr(&s, "keep");
4741 for (i = 0; i < N_BUCKETS; i++) {
4743 ds_put_cstr(&s, ", drop");
4746 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
4749 VLOG_INFO("%s: %s (msec:count)",
4750 dpif_name(ofproto->dpif), ds_cstr(&s));
4754 return bucket * BUCKET_WIDTH;
4758 facet_active_timeout(struct ofproto *ofproto, struct facet *facet)
4760 if (ofproto->netflow && !facet_is_controller_flow(facet) &&
4761 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
4762 struct ofexpired expired;
4763 struct odp_flow odp_flow;
4765 /* Get updated flow stats.
4767 * XXX We could avoid this call entirely if (1) ofproto_update_used()
4768 * updated TCP flags and (2) the dpif_flow_list_all() in
4769 * ofproto_update_used() zeroed TCP flags. */
4770 memset(&odp_flow, 0, sizeof odp_flow);
4771 if (facet->installed) {
4772 odp_flow_key_from_flow(&odp_flow.key, &facet->flow);
4773 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
4774 dpif_flow_get(ofproto->dpif, &odp_flow);
4776 if (odp_flow.stats.n_packets) {
4777 facet_update_time(ofproto, facet, &odp_flow.stats);
4778 netflow_flow_update_flags(&facet->nf_flow,
4779 odp_flow.stats.tcp_flags);
4783 expired.flow = facet->flow;
4784 expired.packet_count = facet->packet_count +
4785 odp_flow.stats.n_packets;
4786 expired.byte_count = facet->byte_count + odp_flow.stats.n_bytes;
4787 expired.used = facet->used;
4789 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
4794 ofproto_expire_facets(struct ofproto *ofproto, int dp_max_idle)
4796 long long int cutoff = time_msec() - dp_max_idle;
4797 struct facet *facet, *next_facet;
4799 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
4800 facet_active_timeout(ofproto, facet);
4801 if (facet->used < cutoff) {
4802 facet_remove(ofproto, facet);
4807 /* If 'cls_rule' is an OpenFlow rule, that has expired according to OpenFlow
4808 * rules, then delete it entirely.
4810 * (This is a callback function for classifier_for_each().) */
4812 rule_expire(struct cls_rule *cls_rule, void *cbdata_)
4814 struct expire_cbdata *cbdata = cbdata_;
4815 struct rule *rule = rule_from_cls_rule(cls_rule);
4816 struct facet *facet, *next_facet;
4820 /* Has 'rule' expired? */
4822 if (rule->hard_timeout
4823 && now > rule->created + rule->hard_timeout * 1000) {
4824 reason = OFPRR_HARD_TIMEOUT;
4825 } else if (rule->idle_timeout && list_is_empty(&rule->facets)
4826 && now >rule->used + rule->idle_timeout * 1000) {
4827 reason = OFPRR_IDLE_TIMEOUT;
4832 COVERAGE_INC(ofproto_expired);
4834 /* Update stats. (This is a no-op if the rule expired due to an idle
4835 * timeout, because that only happens when the rule has no facets left.) */
4836 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
4837 facet_remove(cbdata->ofproto, facet);
4840 /* Get rid of the rule. */
4841 if (!rule_is_hidden(rule)) {
4842 rule_send_removed(cbdata->ofproto, rule, reason);
4844 rule_remove(cbdata->ofproto, rule);
4847 static struct ofpbuf *
4848 compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule,
4851 struct ofp_flow_removed *ofr;
4854 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
4855 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, ofconn->flow_format,
4857 ofr->cookie = rule->flow_cookie;
4858 ofr->priority = htons(rule->cr.priority);
4859 ofr->reason = reason;
4860 calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec);
4861 ofr->idle_timeout = htons(rule->idle_timeout);
4862 ofr->packet_count = htonll(rule->packet_count);
4863 ofr->byte_count = htonll(rule->byte_count);
4868 static struct ofpbuf *
4869 compose_nx_flow_removed(const struct rule *rule, uint8_t reason)
4871 struct nx_flow_removed *nfr;
4875 nfr = make_nxmsg(sizeof *nfr, NXT_FLOW_REMOVED, &buf);
4877 match_len = nx_put_match(buf, &rule->cr);
4879 nfr->cookie = rule->flow_cookie;
4880 nfr->priority = htons(rule->cr.priority);
4881 nfr->reason = reason;
4882 calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec);
4883 nfr->idle_timeout = htons(rule->idle_timeout);
4884 nfr->match_len = htons(match_len);
4885 nfr->packet_count = htonll(rule->packet_count);
4886 nfr->byte_count = htonll(rule->byte_count);
4892 rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason)
4894 struct ofconn *ofconn;
4896 if (!rule->send_flow_removed) {
4900 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
4903 if (!rconn_is_connected(ofconn->rconn)
4904 || !ofconn_receives_async_msgs(ofconn)) {
4908 msg = (ofconn->flow_format == NXFF_NXM
4909 ? compose_nx_flow_removed(rule, reason)
4910 : compose_ofp_flow_removed(ofconn, rule, reason));
4912 /* Account flow expirations under ofconn->reply_counter, the counter
4913 * for replies to OpenFlow requests. That works because preventing
4914 * OpenFlow requests from being processed also prevents new flows from
4915 * being added (and expiring). (It also prevents processing OpenFlow
4916 * requests that would not add new flows, so it is imperfect.) */
4917 queue_tx(msg, ofconn, ofconn->reply_counter);
4921 /* pinsched callback for sending 'packet' on 'ofconn'. */
4923 do_send_packet_in(struct ofpbuf *packet, void *ofconn_)
4925 struct ofconn *ofconn = ofconn_;
4927 rconn_send_with_limit(ofconn->rconn, packet,
4928 ofconn->packet_in_counter, 100);
4931 /* Takes 'packet', which has been converted with do_convert_to_packet_in(), and
4932 * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s
4933 * packet scheduler for sending.
4935 * 'max_len' specifies the maximum number of bytes of the packet to send on
4936 * 'ofconn' (INT_MAX specifies no limit).
4938 * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise,
4939 * ownership is transferred to this function. */
4941 schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len,
4944 struct ofproto *ofproto = ofconn->ofproto;
4945 struct ofp_packet_in *opi = packet->data;
4946 uint16_t in_port = ofp_port_to_odp_port(ntohs(opi->in_port));
4947 int send_len, trim_size;
4951 if (opi->reason == OFPR_ACTION) {
4952 buffer_id = UINT32_MAX;
4953 } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
4954 buffer_id = pktbuf_get_null();
4955 } else if (!ofconn->pktbuf) {
4956 buffer_id = UINT32_MAX;
4958 struct ofpbuf payload;
4959 payload.data = opi->data;
4960 payload.size = packet->size - offsetof(struct ofp_packet_in, data);
4961 buffer_id = pktbuf_save(ofconn->pktbuf, &payload, in_port);
4964 /* Figure out how much of the packet to send. */
4965 send_len = ntohs(opi->total_len);
4966 if (buffer_id != UINT32_MAX) {
4967 send_len = MIN(send_len, ofconn->miss_send_len);
4969 send_len = MIN(send_len, max_len);
4971 /* Adjust packet length and clone if necessary. */
4972 trim_size = offsetof(struct ofp_packet_in, data) + send_len;
4974 packet = ofpbuf_clone_data(packet->data, trim_size);
4977 packet->size = trim_size;
4980 /* Update packet headers. */
4981 opi->buffer_id = htonl(buffer_id);
4982 update_openflow_length(packet);
4984 /* Hand over to packet scheduler. It might immediately call into
4985 * do_send_packet_in() or it might buffer it for a while (until a later
4986 * call to pinsched_run()). */
4987 pinsched_send(ofconn->schedulers[opi->reason], in_port,
4988 packet, do_send_packet_in, ofconn);
4991 /* Replace struct odp_msg header in 'packet' by equivalent struct
4992 * ofp_packet_in. The odp_msg must have sufficient headroom to do so (e.g. as
4993 * returned by dpif_recv()).
4995 * The conversion is not complete: the caller still needs to trim any unneeded
4996 * payload off the end of the buffer, set the length in the OpenFlow header,
4997 * and set buffer_id. Those require us to know the controller settings and so
4998 * must be done on a per-controller basis.
5000 * Returns the maximum number of bytes of the packet that should be sent to
5001 * the controller (INT_MAX if no limit). */
5003 do_convert_to_packet_in(struct ofpbuf *packet)
5005 struct odp_msg *msg = packet->data;
5006 struct ofp_packet_in *opi;
5012 /* Extract relevant header fields */
5013 if (msg->type == _ODPL_ACTION_NR) {
5014 reason = OFPR_ACTION;
5017 reason = OFPR_NO_MATCH;
5020 total_len = msg->length - sizeof *msg;
5021 in_port = odp_port_to_ofp_port(msg->port);
5023 /* Repurpose packet buffer by overwriting header. */
5024 ofpbuf_pull(packet, sizeof(struct odp_msg));
5025 opi = ofpbuf_push_zeros(packet, offsetof(struct ofp_packet_in, data));
5026 opi->header.version = OFP_VERSION;
5027 opi->header.type = OFPT_PACKET_IN;
5028 opi->total_len = htons(total_len);
5029 opi->in_port = htons(in_port);
5030 opi->reason = reason;
5035 /* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or
5036 * _ODPL_MISS_NR, sends an OFPT_PACKET_IN message to each OpenFlow controller
5037 * as necessary according to their individual configurations.
5039 * 'packet' must have sufficient headroom to convert it into a struct
5040 * ofp_packet_in (e.g. as returned by dpif_recv()).
5042 * Takes ownership of 'packet'. */
5044 send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet)
5046 struct ofconn *ofconn, *prev;
5049 max_len = do_convert_to_packet_in(packet);
5052 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
5053 if (ofconn_receives_async_msgs(ofconn)) {
5055 schedule_packet_in(prev, packet, max_len, true);
5061 schedule_packet_in(prev, packet, max_len, false);
5063 ofpbuf_delete(packet);
5068 pick_datapath_id(const struct ofproto *ofproto)
5070 const struct ofport *port;
5072 port = get_port(ofproto, ODPP_LOCAL);
5074 uint8_t ea[ETH_ADDR_LEN];
5077 error = netdev_get_etheraddr(port->netdev, ea);
5079 return eth_addr_to_uint64(ea);
5081 VLOG_WARN("could not get MAC address for %s (%s)",
5082 netdev_get_name(port->netdev), strerror(error));
5084 return ofproto->fallback_dpid;
5088 pick_fallback_dpid(void)
5090 uint8_t ea[ETH_ADDR_LEN];
5091 eth_addr_nicira_random(ea);
5092 return eth_addr_to_uint64(ea);
5096 default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet,
5097 struct odp_actions *actions, tag_type *tags,
5098 uint16_t *nf_output_iface, void *ofproto_)
5100 struct ofproto *ofproto = ofproto_;
5103 /* Drop frames for reserved multicast addresses. */
5104 if (eth_addr_is_reserved(flow->dl_dst)) {
5108 /* Learn source MAC (but don't try to learn from revalidation). */
5109 if (packet != NULL) {
5110 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
5112 GRAT_ARP_LOCK_NONE);
5114 /* The log messages here could actually be useful in debugging,
5115 * so keep the rate limit relatively high. */
5116 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
5117 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
5118 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
5119 ofproto_revalidate(ofproto, rev_tag);
5123 /* Determine output port. */
5124 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags,
5127 flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD,
5128 nf_output_iface, actions);
5129 } else if (out_port != flow->in_port) {
5130 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
5131 *nf_output_iface = out_port;
5139 static const struct ofhooks default_ofhooks = {
5140 default_normal_ofhook_cb,