1 /* Copyright (c) 2008, 2009 Nicira Networks
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
20 #include <arpa/inet.h>
24 #include <openflow/openflow.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
36 #include "dynamic-string.h"
40 #include "mac-learning.h"
43 #include "ofp-print.h"
45 #include "ofproto/netflow.h"
46 #include "ofproto/ofproto.h"
48 #include "poll-loop.h"
49 #include "port-array.h"
50 #include "proc-net-compat.h"
54 #include "socket-util.h"
60 #include "vconn-ssl.h"
61 #include "vswitchd/vswitch-idl.h"
62 #include "xenserver.h"
65 #define THIS_MODULE VLM_bridge
74 /* These members are always valid. */
75 struct port *port; /* Containing port. */
76 size_t port_ifidx; /* Index within containing port. */
77 char *name; /* Host network device name. */
78 tag_type tag; /* Tag associated with this interface. */
79 long long delay_expires; /* Time after which 'enabled' may change. */
81 /* These members are valid only after bridge_reconfigure() causes them to
83 int dp_ifidx; /* Index within kernel datapath. */
84 struct netdev *netdev; /* Network device. */
85 bool enabled; /* May be chosen for flows? */
87 /* This member is only valid *during* bridge_reconfigure(). */
88 const struct ovsrec_interface *cfg;
91 #define BOND_MASK 0xff
93 int iface_idx; /* Index of assigned iface, or -1 if none. */
94 uint64_t tx_bytes; /* Count of bytes recently transmitted. */
95 tag_type iface_tag; /* Tag associated with iface_idx. */
98 #define MAX_MIRRORS 32
99 typedef uint32_t mirror_mask_t;
100 #define MIRROR_MASK_C(X) UINT32_C(X)
101 BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
103 struct bridge *bridge;
107 /* Selection criteria. */
108 struct svec src_ports;
109 struct svec dst_ports;
114 struct port *out_port;
118 #define FLOOD_PORT ((struct port *) 1) /* The 'flood' output port. */
120 struct bridge *bridge;
122 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
123 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1. */
126 /* An ordinary bridge port has 1 interface.
127 * A bridge port for bonding has at least 2 interfaces. */
128 struct iface **ifaces;
129 size_t n_ifaces, allocated_ifaces;
132 struct bond_entry *bond_hash; /* An array of (BOND_MASK + 1) elements. */
133 int active_iface; /* Ifidx on which bcasts accepted, or -1. */
134 tag_type active_iface_tag; /* Tag for bcast flows. */
135 tag_type no_ifaces_tag; /* Tag for flows when all ifaces disabled. */
136 int updelay, downdelay; /* Delay before iface goes up/down, in ms. */
137 bool bond_compat_is_stale; /* Need to call port_update_bond_compat()? */
138 bool bond_fake_iface; /* Fake a bond interface for legacy compat? */
140 /* Port mirroring info. */
141 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
142 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
143 bool is_mirror_output_port; /* Does port mirroring send frames here? */
145 /* This member is only valid *during* bridge_reconfigure(). */
146 const struct ovsrec_port *cfg;
149 #define DP_MAX_PORTS 255
151 struct list node; /* Node in global list of bridges. */
152 char *name; /* User-specified arbitrary name. */
153 struct mac_learning *ml; /* MAC learning table. */
154 bool sent_config_request; /* Successfully sent config request? */
155 uint8_t default_ea[ETH_ADDR_LEN]; /* Default MAC. */
157 /* Support for remote controllers. */
158 char *controller; /* NULL if there is no remote controller;
159 * "discover" to do controller discovery;
160 * otherwise a vconn name. */
162 /* OpenFlow switch processing. */
163 struct ofproto *ofproto; /* OpenFlow switch. */
165 /* Kernel datapath information. */
166 struct dpif *dpif; /* Datapath. */
167 struct port_array ifaces; /* Indexed by kernel datapath port number. */
171 size_t n_ports, allocated_ports;
174 bool has_bonded_ports;
175 long long int bond_next_rebalance;
180 /* Flow statistics gathering. */
181 time_t next_stats_request;
183 /* Port mirroring. */
184 struct mirror *mirrors[MAX_MIRRORS];
186 /* This member is only valid *during* bridge_reconfigure(). */
187 const struct ovsrec_bridge *cfg;
190 /* List of all bridges. */
191 static struct list all_bridges = LIST_INITIALIZER(&all_bridges);
193 /* Maximum number of datapaths. */
194 enum { DP_MAX = 256 };
196 static struct bridge *bridge_create(const char *name);
197 static void bridge_destroy(struct bridge *);
198 static struct bridge *bridge_lookup(const char *name);
199 static unixctl_cb_func bridge_unixctl_dump_flows;
200 static int bridge_run_one(struct bridge *);
201 static void bridge_reconfigure_one(const struct ovsrec_open_vswitch *,
203 static void bridge_reconfigure_controller(const struct ovsrec_open_vswitch *,
205 static void bridge_get_all_ifaces(const struct bridge *, struct shash *ifaces);
206 static void bridge_fetch_dp_ifaces(struct bridge *);
207 static void bridge_flush(struct bridge *);
208 static void bridge_pick_local_hw_addr(struct bridge *,
209 uint8_t ea[ETH_ADDR_LEN],
210 struct iface **hw_addr_iface);
211 static uint64_t bridge_pick_datapath_id(struct bridge *,
212 const uint8_t bridge_ea[ETH_ADDR_LEN],
213 struct iface *hw_addr_iface);
214 static struct iface *bridge_get_local_iface(struct bridge *);
215 static uint64_t dpid_from_hash(const void *, size_t nbytes);
217 static unixctl_cb_func bridge_unixctl_fdb_show;
219 static void bond_init(void);
220 static void bond_run(struct bridge *);
221 static void bond_wait(struct bridge *);
222 static void bond_rebalance_port(struct port *);
223 static void bond_send_learning_packets(struct port *);
224 static void bond_enable_slave(struct iface *iface, bool enable);
226 static struct port *port_create(struct bridge *, const char *name);
227 static void port_reconfigure(struct port *, const struct ovsrec_port *);
228 static void port_destroy(struct port *);
229 static struct port *port_lookup(const struct bridge *, const char *name);
230 static struct iface *port_lookup_iface(const struct port *, const char *name);
231 static struct port *port_from_dp_ifidx(const struct bridge *,
233 static void port_update_bond_compat(struct port *);
234 static void port_update_vlan_compat(struct port *);
235 static void port_update_bonding(struct port *);
238 static void mirror_create(struct bridge *, const char *name);
239 static void mirror_destroy(struct mirror *);
240 static void mirror_reconfigure(struct bridge *);
241 static void mirror_reconfigure_one(struct mirror *);
242 static bool vlan_is_mirrored(const struct mirror *, int vlan);
244 static bool vlan_is_mirrored(const struct mirror *m UNUSED, int vlan UNUSED)
250 static struct iface *iface_create(struct port *port,
251 const struct ovsrec_interface *if_cfg);
252 static void iface_destroy(struct iface *);
253 static struct iface *iface_lookup(const struct bridge *, const char *name);
254 static struct iface *iface_from_dp_ifidx(const struct bridge *,
256 static bool iface_is_internal(const struct bridge *, const char *name);
257 static void iface_set_mac(struct iface *);
259 /* Hooks into ofproto processing. */
260 static struct ofhooks bridge_ofhooks;
262 /* Public functions. */
264 /* Adds the name of each interface used by a bridge, including local and
265 * internal ports, to 'svec'. */
267 bridge_get_ifaces(struct svec *svec)
269 struct bridge *br, *next;
272 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
273 for (i = 0; i < br->n_ports; i++) {
274 struct port *port = br->ports[i];
276 for (j = 0; j < port->n_ifaces; j++) {
277 struct iface *iface = port->ifaces[j];
278 if (iface->dp_ifidx < 0) {
279 VLOG_ERR("%s interface not in datapath %s, ignoring",
280 iface->name, dpif_name(br->dpif));
282 if (iface->dp_ifidx != ODPP_LOCAL) {
283 svec_add(svec, iface->name);
292 bridge_init(const struct ovsrec_open_vswitch *cfg)
294 struct svec bridge_names;
295 struct svec dpif_names;
298 unixctl_command_register("fdb/show", bridge_unixctl_fdb_show, NULL);
300 svec_init(&bridge_names);
301 for (i = 0; i < cfg->n_bridges; i++) {
302 svec_add(&bridge_names, cfg->bridges[i]->name);
304 svec_sort(&bridge_names);
306 svec_init(&dpif_names);
307 dp_enumerate(&dpif_names);
308 for (i = 0; i < dpif_names.n; i++) {
309 const char *dpif_name = dpif_names.names[i];
313 retval = dpif_open(dpif_name, &dpif);
315 struct svec all_names;
318 svec_init(&all_names);
319 dpif_get_all_names(dpif, &all_names);
320 for (j = 0; j < all_names.n; j++) {
321 if (svec_contains(&bridge_names, all_names.names[j])) {
327 svec_destroy(&all_names);
331 svec_destroy(&dpif_names);
333 unixctl_command_register("bridge/dump-flows", bridge_unixctl_dump_flows,
337 bridge_reconfigure(cfg);
342 config_string_change(const char *value, char **valuep)
344 if (value && (!*valuep || strcmp(value, *valuep))) {
346 *valuep = xstrdup(value);
354 bridge_configure_ssl(const struct ovsrec_ssl *ssl)
356 /* XXX SSL should be configurable on a per-bridge basis.
357 * XXX should be possible to de-configure SSL. */
358 static char *private_key_file;
359 static char *certificate_file;
360 static char *cacert_file;
364 /* XXX We can't un-set SSL settings. */
368 if (config_string_change(ssl->private_key, &private_key_file)) {
369 vconn_ssl_set_private_key_file(private_key_file);
372 if (config_string_change(ssl->certificate, &certificate_file)) {
373 vconn_ssl_set_certificate_file(certificate_file);
376 /* We assume that even if the filename hasn't changed, if the CA cert
377 * file has been removed, that we want to move back into
378 * boot-strapping mode. This opens a small security hole, because
379 * the old certificate will still be trusted until vSwitch is
380 * restarted. We may want to address this in vconn's SSL library. */
381 if (config_string_change(ssl->ca_cert, &cacert_file)
382 || (cacert_file && stat(cacert_file, &s) && errno == ENOENT)) {
383 vconn_ssl_set_ca_cert_file(cacert_file, ssl->bootstrap_ca_cert);
388 /* Attempt to create the network device 'iface_name' through the netdev
391 set_up_iface(const struct ovsrec_interface *iface_cfg, bool create)
393 struct shash_node *node;
394 struct shash options;
398 /* If a type is not explicitly declared, then assume it's an existing
399 * "system" device. */
400 if (iface_cfg->type[0] == '\0' || !strcmp(iface_cfg->type, "system")) {
404 shash_init(&options);
405 for (i = 0; i < iface_cfg->n_options; i++) {
406 shash_add(&options, iface_cfg->key_options[i],
407 xstrdup(iface_cfg->value_options[i]));
411 error = netdev_create(iface_cfg->name, iface_cfg->type, &options);
413 /* xxx Check to make sure that the type hasn't changed. */
414 error = netdev_reconfigure(iface_cfg->name, &options);
417 SHASH_FOR_EACH (node, &options) {
420 shash_destroy(&options);
426 reconfigure_iface(const struct ovsrec_interface *iface_cfg)
428 return set_up_iface(iface_cfg, false);
432 /* iterate_and_prune_ifaces() callback function that opens the network device
433 * for 'iface', if it is not already open, and retrieves the interface's MAC
434 * address and carrier status. */
436 init_iface_netdev(struct bridge *br UNUSED, struct iface *iface,
441 } else if (!netdev_open(iface->name, NETDEV_ETH_TYPE_NONE,
443 netdev_get_carrier(iface->netdev, &iface->enabled);
446 /* If the network device can't be opened, then we're not going to try
447 * to do anything with this interface. */
453 check_iface_dp_ifidx(struct bridge *br, struct iface *iface, void *aux UNUSED)
455 if (iface->dp_ifidx >= 0) {
456 VLOG_DBG("%s has interface %s on port %d",
458 iface->name, iface->dp_ifidx);
461 VLOG_ERR("%s interface not in %s, dropping",
462 iface->name, dpif_name(br->dpif));
468 set_iface_properties(struct bridge *br UNUSED, struct iface *iface,
471 /* Set policing attributes. */
472 netdev_set_policing(iface->netdev,
473 iface->cfg->ingress_policing_rate,
474 iface->cfg->ingress_policing_burst);
476 /* Set MAC address of internal interfaces other than the local
478 if (iface->dp_ifidx != ODPP_LOCAL
479 && iface_is_internal(br, iface->name)) {
480 iface_set_mac(iface);
486 /* Calls 'cb' for each interfaces in 'br', passing along the 'aux' argument.
487 * Deletes from 'br' all the interfaces for which 'cb' returns false, and then
488 * deletes from 'br' any ports that no longer have any interfaces. */
490 iterate_and_prune_ifaces(struct bridge *br,
491 bool (*cb)(struct bridge *, struct iface *,
497 for (i = 0; i < br->n_ports; ) {
498 struct port *port = br->ports[i];
499 for (j = 0; j < port->n_ifaces; ) {
500 struct iface *iface = port->ifaces[j];
501 if (cb(br, iface, aux)) {
504 iface_destroy(iface);
508 if (port->n_ifaces) {
511 VLOG_ERR("%s port has no interfaces, dropping", port->name);
518 bridge_reconfigure(const struct ovsrec_open_vswitch *ovs_cfg)
520 struct ovsdb_idl_txn *txn;
521 struct shash old_br, new_br;
522 struct shash_node *node;
523 struct bridge *br, *next;
526 COVERAGE_INC(bridge_reconfigure);
528 txn = ovsdb_idl_txn_create(ovs_cfg->header_.table->idl);
530 /* Collect old and new bridges. */
533 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
534 shash_add(&old_br, br->name, br);
536 for (i = 0; i < ovs_cfg->n_bridges; i++) {
537 const struct ovsrec_bridge *br_cfg = ovs_cfg->bridges[i];
538 if (!shash_add_once(&new_br, br_cfg->name, br_cfg)) {
539 VLOG_WARN("more than one bridge named %s", br_cfg->name);
543 /* Get rid of deleted bridges and add new bridges. */
544 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
545 struct ovsrec_bridge *br_cfg = shash_find_data(&new_br, br->name);
552 SHASH_FOR_EACH (node, &new_br) {
553 const char *br_name = node->name;
554 const struct ovsrec_bridge *br_cfg = node->data;
555 if (!shash_find_data(&old_br, br_name)) {
556 br = bridge_create(br_name);
562 shash_destroy(&old_br);
563 shash_destroy(&new_br);
567 bridge_configure_ssl(ovs_cfg->ssl);
570 /* Reconfigure all bridges. */
571 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
572 bridge_reconfigure_one(ovs_cfg, br);
575 /* Add and delete ports on all datapaths.
577 * The kernel will reject any attempt to add a given port to a datapath if
578 * that port already belongs to a different datapath, so we must do all
579 * port deletions before any port additions. */
580 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
581 struct odp_port *dpif_ports;
583 struct shash want_ifaces;
585 dpif_port_list(br->dpif, &dpif_ports, &n_dpif_ports);
586 bridge_get_all_ifaces(br, &want_ifaces);
587 for (i = 0; i < n_dpif_ports; i++) {
588 const struct odp_port *p = &dpif_ports[i];
589 if (!shash_find(&want_ifaces, p->devname)
590 && strcmp(p->devname, br->name)) {
591 int retval = dpif_port_del(br->dpif, p->port);
593 VLOG_ERR("failed to remove %s interface from %s: %s",
594 p->devname, dpif_name(br->dpif),
599 shash_destroy(&want_ifaces);
602 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
603 struct odp_port *dpif_ports;
605 struct shash cur_ifaces, want_ifaces;
606 struct shash_node *node;
608 /* Get the set of interfaces currently in this datapath. */
609 dpif_port_list(br->dpif, &dpif_ports, &n_dpif_ports);
610 shash_init(&cur_ifaces);
611 for (i = 0; i < n_dpif_ports; i++) {
612 const char *name = dpif_ports[i].devname;
613 if (!shash_find(&cur_ifaces, name)) {
614 shash_add(&cur_ifaces, name, NULL);
619 /* Get the set of interfaces we want on this datapath. */
620 bridge_get_all_ifaces(br, &want_ifaces);
622 SHASH_FOR_EACH (node, &want_ifaces) {
623 const char *if_name = node->name;
624 struct iface *iface = node->data;
626 if (shash_find(&cur_ifaces, if_name)) {
627 /* Already exists, just reconfigure it. */
629 reconfigure_iface(iface->cfg);
632 /* Need to add to datapath. */
636 /* Add to datapath. */
637 internal = iface_is_internal(br, if_name);
638 error = dpif_port_add(br->dpif, if_name,
639 internal ? ODP_PORT_INTERNAL : 0, NULL);
640 if (error == EFBIG) {
641 VLOG_ERR("ran out of valid port numbers on %s",
642 dpif_name(br->dpif));
645 VLOG_ERR("failed to add %s interface to %s: %s",
646 if_name, dpif_name(br->dpif), strerror(error));
650 shash_destroy(&cur_ifaces);
651 shash_destroy(&want_ifaces);
653 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
656 struct iface *local_iface;
657 struct iface *hw_addr_iface;
660 bridge_fetch_dp_ifaces(br);
661 iterate_and_prune_ifaces(br, init_iface_netdev, NULL);
663 iterate_and_prune_ifaces(br, check_iface_dp_ifidx, NULL);
665 /* Pick local port hardware address, datapath ID. */
666 bridge_pick_local_hw_addr(br, ea, &hw_addr_iface);
667 local_iface = bridge_get_local_iface(br);
669 int error = netdev_set_etheraddr(local_iface->netdev, ea);
671 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
672 VLOG_ERR_RL(&rl, "bridge %s: failed to set bridge "
673 "Ethernet address: %s",
674 br->name, strerror(error));
678 dpid = bridge_pick_datapath_id(br, ea, hw_addr_iface);
679 ofproto_set_datapath_id(br->ofproto, dpid);
681 dpid_string = xasprintf("%012"PRIx64, dpid);
682 ovsrec_bridge_set_datapath_id(br->cfg, dpid_string);
685 /* Set NetFlow configuration on this bridge. */
686 if (br->cfg->netflow) {
687 struct ovsrec_netflow *nf_cfg = br->cfg->netflow;
688 struct netflow_options opts;
690 memset(&opts, 0, sizeof opts);
692 dpif_get_netflow_ids(br->dpif, &opts.engine_type, &opts.engine_id);
693 if (nf_cfg->engine_type) {
694 opts.engine_type = nf_cfg->engine_type;
696 if (nf_cfg->engine_id) {
697 opts.engine_id = nf_cfg->engine_id;
700 opts.active_timeout = nf_cfg->active_timeout;
701 if (!opts.active_timeout) {
702 opts.active_timeout = -1;
703 } else if (opts.active_timeout < 0) {
704 VLOG_WARN("bridge %s: active timeout interval set to negative "
705 "value, using default instead (%d seconds)", br->name,
706 NF_ACTIVE_TIMEOUT_DEFAULT);
707 opts.active_timeout = -1;
710 opts.add_id_to_iface = nf_cfg->add_id_to_interface;
711 if (opts.add_id_to_iface) {
712 if (opts.engine_id > 0x7f) {
713 VLOG_WARN("bridge %s: netflow port mangling may conflict "
714 "with another vswitch, choose an engine id less "
715 "than 128", br->name);
717 if (br->n_ports > 508) {
718 VLOG_WARN("bridge %s: netflow port mangling will conflict "
719 "with another port when more than 508 ports are "
724 opts.collectors.n = nf_cfg->n_targets;
725 opts.collectors.names = nf_cfg->targets;
726 if (ofproto_set_netflow(br->ofproto, &opts)) {
727 VLOG_ERR("bridge %s: problem setting netflow collectors",
731 ofproto_set_netflow(br->ofproto, NULL);
734 /* Update the controller and related settings. It would be more
735 * straightforward to call this from bridge_reconfigure_one(), but we
736 * can't do it there for two reasons. First, and most importantly, at
737 * that point we don't know the dp_ifidx of any interfaces that have
738 * been added to the bridge (because we haven't actually added them to
739 * the datapath). Second, at that point we haven't set the datapath ID
740 * yet; when a controller is configured, resetting the datapath ID will
741 * immediately disconnect from the controller, so it's better to set
742 * the datapath ID before the controller. */
743 bridge_reconfigure_controller(ovs_cfg, br);
745 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
746 for (i = 0; i < br->n_ports; i++) {
747 struct port *port = br->ports[i];
749 port_update_vlan_compat(port);
750 port_update_bonding(port);
753 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
754 iterate_and_prune_ifaces(br, set_iface_properties, NULL);
757 ovsdb_idl_txn_commit(txn);
758 ovsdb_idl_txn_destroy(txn); /* XXX */
762 bridge_get_other_config(const struct ovsrec_bridge *br_cfg, const char *key)
766 for (i = 0; i < br_cfg->n_other_config; i++) {
767 if (!strcmp(br_cfg->key_other_config[i], key)) {
768 return br_cfg->value_other_config[i];
775 bridge_pick_local_hw_addr(struct bridge *br, uint8_t ea[ETH_ADDR_LEN],
776 struct iface **hw_addr_iface)
782 *hw_addr_iface = NULL;
784 /* Did the user request a particular MAC? */
785 hwaddr = bridge_get_other_config(br->cfg, "hwaddr");
786 if (hwaddr && eth_addr_from_string(hwaddr, ea)) {
787 if (eth_addr_is_multicast(ea)) {
788 VLOG_ERR("bridge %s: cannot set MAC address to multicast "
789 "address "ETH_ADDR_FMT, br->name, ETH_ADDR_ARGS(ea));
790 } else if (eth_addr_is_zero(ea)) {
791 VLOG_ERR("bridge %s: cannot set MAC address to zero", br->name);
797 /* Otherwise choose the minimum MAC address among all of the interfaces.
798 * (Xen uses FE:FF:FF:FF:FF:FF for virtual interfaces so this will get the
799 * MAC of the physical interface in such an environment.) */
800 memset(ea, 0xff, sizeof ea);
801 for (i = 0; i < br->n_ports; i++) {
802 struct port *port = br->ports[i];
803 uint8_t iface_ea[ETH_ADDR_LEN];
806 /* Mirror output ports don't participate. */
807 if (port->is_mirror_output_port) {
811 /* Choose the MAC address to represent the port. */
812 if (port->cfg->mac && eth_addr_from_string(port->cfg->mac, iface_ea)) {
813 /* Find the interface with this Ethernet address (if any) so that
814 * we can provide the correct devname to the caller. */
816 for (j = 0; j < port->n_ifaces; j++) {
817 struct iface *candidate = port->ifaces[j];
818 uint8_t candidate_ea[ETH_ADDR_LEN];
819 if (!netdev_get_etheraddr(candidate->netdev, candidate_ea)
820 && eth_addr_equals(iface_ea, candidate_ea)) {
825 /* Choose the interface whose MAC address will represent the port.
826 * The Linux kernel bonding code always chooses the MAC address of
827 * the first slave added to a bond, and the Fedora networking
828 * scripts always add slaves to a bond in alphabetical order, so
829 * for compatibility we choose the interface with the name that is
830 * first in alphabetical order. */
831 iface = port->ifaces[0];
832 for (j = 1; j < port->n_ifaces; j++) {
833 struct iface *candidate = port->ifaces[j];
834 if (strcmp(candidate->name, iface->name) < 0) {
839 /* The local port doesn't count (since we're trying to choose its
840 * MAC address anyway). Other internal ports don't count because
841 * we really want a physical MAC if we can get it, and internal
842 * ports typically have randomly generated MACs. */
843 if (iface->dp_ifidx == ODPP_LOCAL
844 || !strcmp(iface->cfg->type, "internal")) {
849 error = netdev_get_etheraddr(iface->netdev, iface_ea);
851 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
852 VLOG_ERR_RL(&rl, "failed to obtain Ethernet address of %s: %s",
853 iface->name, strerror(error));
858 /* Compare against our current choice. */
859 if (!eth_addr_is_multicast(iface_ea) &&
860 !eth_addr_is_reserved(iface_ea) &&
861 !eth_addr_is_zero(iface_ea) &&
862 memcmp(iface_ea, ea, ETH_ADDR_LEN) < 0)
864 memcpy(ea, iface_ea, ETH_ADDR_LEN);
865 *hw_addr_iface = iface;
868 if (eth_addr_is_multicast(ea) || eth_addr_is_vif(ea)) {
869 memcpy(ea, br->default_ea, ETH_ADDR_LEN);
870 *hw_addr_iface = NULL;
871 VLOG_WARN("bridge %s: using default bridge Ethernet "
872 "address "ETH_ADDR_FMT, br->name, ETH_ADDR_ARGS(ea));
874 VLOG_DBG("bridge %s: using bridge Ethernet address "ETH_ADDR_FMT,
875 br->name, ETH_ADDR_ARGS(ea));
879 /* Choose and returns the datapath ID for bridge 'br' given that the bridge
880 * Ethernet address is 'bridge_ea'. If 'bridge_ea' is the Ethernet address of
881 * an interface on 'br', then that interface must be passed in as
882 * 'hw_addr_iface'; if 'bridge_ea' was derived some other way, then
883 * 'hw_addr_iface' must be passed in as a null pointer. */
885 bridge_pick_datapath_id(struct bridge *br,
886 const uint8_t bridge_ea[ETH_ADDR_LEN],
887 struct iface *hw_addr_iface)
890 * The procedure for choosing a bridge MAC address will, in the most
891 * ordinary case, also choose a unique MAC that we can use as a datapath
892 * ID. In some special cases, though, multiple bridges will end up with
893 * the same MAC address. This is OK for the bridges, but it will confuse
894 * the OpenFlow controller, because each datapath needs a unique datapath
897 * Datapath IDs must be unique. It is also very desirable that they be
898 * stable from one run to the next, so that policy set on a datapath
901 const char *datapath_id;
904 datapath_id = bridge_get_other_config(br->cfg, "datapath-id");
905 if (datapath_id && dpid_from_string(datapath_id, &dpid)) {
911 if (!netdev_get_vlan_vid(hw_addr_iface->netdev, &vlan)) {
913 * A bridge whose MAC address is taken from a VLAN network device
914 * (that is, a network device created with vconfig(8) or similar
915 * tool) will have the same MAC address as a bridge on the VLAN
916 * device's physical network device.
918 * Handle this case by hashing the physical network device MAC
919 * along with the VLAN identifier.
921 uint8_t buf[ETH_ADDR_LEN + 2];
922 memcpy(buf, bridge_ea, ETH_ADDR_LEN);
923 buf[ETH_ADDR_LEN] = vlan >> 8;
924 buf[ETH_ADDR_LEN + 1] = vlan;
925 return dpid_from_hash(buf, sizeof buf);
928 * Assume that this bridge's MAC address is unique, since it
929 * doesn't fit any of the cases we handle specially.
934 * A purely internal bridge, that is, one that has no non-virtual
935 * network devices on it at all, is more difficult because it has no
936 * natural unique identifier at all.
938 * When the host is a XenServer, we handle this case by hashing the
939 * host's UUID with the name of the bridge. Names of bridges are
940 * persistent across XenServer reboots, although they can be reused if
941 * an internal network is destroyed and then a new one is later
942 * created, so this is fairly effective.
944 * When the host is not a XenServer, we punt by using a random MAC
945 * address on each run.
947 const char *host_uuid = xenserver_get_host_uuid();
949 char *combined = xasprintf("%s,%s", host_uuid, br->name);
950 dpid = dpid_from_hash(combined, strlen(combined));
956 return eth_addr_to_uint64(bridge_ea);
960 dpid_from_hash(const void *data, size_t n)
962 uint8_t hash[SHA1_DIGEST_SIZE];
964 BUILD_ASSERT_DECL(sizeof hash >= ETH_ADDR_LEN);
965 sha1_bytes(data, n, hash);
966 eth_addr_mark_random(hash);
967 return eth_addr_to_uint64(hash);
973 struct bridge *br, *next;
977 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
978 int error = bridge_run_one(br);
980 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
981 VLOG_ERR_RL(&rl, "bridge %s: datapath was destroyed externally, "
982 "forcing reconfiguration", br->name);
996 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
997 ofproto_wait(br->ofproto);
998 if (br->controller) {
1002 mac_learning_wait(br->ml);
1007 /* Forces 'br' to revalidate all of its flows. This is appropriate when 'br''s
1008 * configuration changes. */
1010 bridge_flush(struct bridge *br)
1012 COVERAGE_INC(bridge_flush);
1014 mac_learning_flush(br->ml);
1017 /* Returns the 'br' interface for the ODPP_LOCAL port, or null if 'br' has no
1018 * such interface. */
1019 static struct iface *
1020 bridge_get_local_iface(struct bridge *br)
1024 for (i = 0; i < br->n_ports; i++) {
1025 struct port *port = br->ports[i];
1026 for (j = 0; j < port->n_ifaces; j++) {
1027 struct iface *iface = port->ifaces[j];
1028 if (iface->dp_ifidx == ODPP_LOCAL) {
1037 /* Bridge unixctl user interface functions. */
1039 bridge_unixctl_fdb_show(struct unixctl_conn *conn,
1040 const char *args, void *aux UNUSED)
1042 struct ds ds = DS_EMPTY_INITIALIZER;
1043 const struct bridge *br;
1044 const struct mac_entry *e;
1046 br = bridge_lookup(args);
1048 unixctl_command_reply(conn, 501, "no such bridge");
1052 ds_put_cstr(&ds, " port VLAN MAC Age\n");
1053 LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
1054 if (e->port < 0 || e->port >= br->n_ports) {
1057 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
1058 br->ports[e->port]->ifaces[0]->dp_ifidx,
1059 e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
1061 unixctl_command_reply(conn, 200, ds_cstr(&ds));
1065 /* Bridge reconfiguration functions. */
1067 static struct bridge *
1068 bridge_create(const char *name)
1073 assert(!bridge_lookup(name));
1074 br = xzalloc(sizeof *br);
1076 error = dpif_create_and_open(name, &br->dpif);
1081 dpif_flow_flush(br->dpif);
1083 error = ofproto_create(name, &bridge_ofhooks, br, &br->ofproto);
1085 VLOG_ERR("failed to create switch %s: %s", name, strerror(error));
1086 dpif_delete(br->dpif);
1087 dpif_close(br->dpif);
1092 br->name = xstrdup(name);
1093 br->ml = mac_learning_create();
1094 br->sent_config_request = false;
1095 eth_addr_random(br->default_ea);
1097 port_array_init(&br->ifaces);
1100 br->bond_next_rebalance = time_msec() + 10000;
1102 list_push_back(&all_bridges, &br->node);
1104 VLOG_INFO("created bridge %s on %s", br->name, dpif_name(br->dpif));
1110 bridge_destroy(struct bridge *br)
1115 while (br->n_ports > 0) {
1116 port_destroy(br->ports[br->n_ports - 1]);
1118 list_remove(&br->node);
1119 error = dpif_delete(br->dpif);
1120 if (error && error != ENOENT) {
1121 VLOG_ERR("failed to delete %s: %s",
1122 dpif_name(br->dpif), strerror(error));
1124 dpif_close(br->dpif);
1125 ofproto_destroy(br->ofproto);
1126 free(br->controller);
1127 mac_learning_destroy(br->ml);
1128 port_array_destroy(&br->ifaces);
1135 static struct bridge *
1136 bridge_lookup(const char *name)
1140 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
1141 if (!strcmp(br->name, name)) {
1149 bridge_exists(const char *name)
1151 return bridge_lookup(name) ? true : false;
1155 bridge_get_datapathid(const char *name)
1157 struct bridge *br = bridge_lookup(name);
1158 return br ? ofproto_get_datapath_id(br->ofproto) : 0;
1161 /* Handle requests for a listing of all flows known by the OpenFlow
1162 * stack, including those normally hidden. */
1164 bridge_unixctl_dump_flows(struct unixctl_conn *conn,
1165 const char *args, void *aux UNUSED)
1170 br = bridge_lookup(args);
1172 unixctl_command_reply(conn, 501, "Unknown bridge");
1177 ofproto_get_all_flows(br->ofproto, &results);
1179 unixctl_command_reply(conn, 200, ds_cstr(&results));
1180 ds_destroy(&results);
1184 bridge_run_one(struct bridge *br)
1188 error = ofproto_run1(br->ofproto);
1193 mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
1196 error = ofproto_run2(br->ofproto, br->flush);
1202 static const struct ovsrec_controller *
1203 bridge_get_controller(const struct ovsrec_open_vswitch *ovs_cfg,
1204 const struct bridge *br)
1206 const struct ovsrec_controller *controller;
1208 controller = (br->cfg->controller ? br->cfg->controller
1209 : ovs_cfg->controller ? ovs_cfg->controller
1212 if (controller && !strcmp(controller->target, "none")) {
1220 check_duplicate_ifaces(struct bridge *br, struct iface *iface, void *ifaces_)
1222 struct svec *ifaces = ifaces_;
1223 if (!svec_contains(ifaces, iface->name)) {
1224 svec_add(ifaces, iface->name);
1228 VLOG_ERR("bridge %s: %s interface is on multiple ports, "
1230 br->name, iface->name, iface->port->name);
1236 bridge_reconfigure_one(const struct ovsrec_open_vswitch *ovs_cfg,
1239 struct shash old_ports, new_ports;
1241 struct svec listeners, old_listeners;
1242 struct svec snoops, old_snoops;
1243 struct shash_node *node;
1247 /* Collect old ports. */
1248 shash_init(&old_ports);
1249 for (i = 0; i < br->n_ports; i++) {
1250 shash_add(&old_ports, br->ports[i]->name, br->ports[i]);
1253 /* Collect new ports. */
1254 shash_init(&new_ports);
1255 for (i = 0; i < br->cfg->n_ports; i++) {
1256 const char *name = br->cfg->ports[i]->name;
1257 if (!shash_add_once(&new_ports, name, br->cfg->ports[i])) {
1258 VLOG_WARN("bridge %s: %s specified twice as bridge port",
1263 /* If we have a controller, then we need a local port. Complain if the
1264 * user didn't specify one.
1266 * XXX perhaps we should synthesize a port ourselves in this case. */
1267 if (bridge_get_controller(ovs_cfg, br)) {
1268 char local_name[IF_NAMESIZE];
1271 error = dpif_port_get_name(br->dpif, ODPP_LOCAL,
1272 local_name, sizeof local_name);
1273 if (!error && !shash_find(&new_ports, local_name)) {
1274 VLOG_WARN("bridge %s: controller specified but no local port "
1275 "(port named %s) defined",
1276 br->name, local_name);
1280 dpid_from_string(ovs_cfg->management_id, &mgmt_id);
1281 ofproto_set_mgmt_id(br->ofproto, mgmt_id);
1283 /* Get rid of deleted ports and add new ports. */
1284 SHASH_FOR_EACH (node, &old_ports) {
1285 if (!shash_find(&new_ports, node->name)) {
1286 port_destroy(node->data);
1289 SHASH_FOR_EACH (node, &new_ports) {
1290 struct port *port = shash_find_data(&old_ports, node->name);
1292 port = port_create(br, node->name);
1294 port_reconfigure(port, node->data);
1296 shash_destroy(&old_ports);
1297 shash_destroy(&new_ports);
1299 /* Check and delete duplicate interfaces. */
1301 iterate_and_prune_ifaces(br, check_duplicate_ifaces, &ifaces);
1302 svec_destroy(&ifaces);
1304 /* Delete all flows if we're switching from connected to standalone or vice
1305 * versa. (XXX Should we delete all flows if we are switching from one
1306 * controller to another?) */
1309 /* Configure OpenFlow management listeners. */
1310 svec_init(&listeners);
1311 cfg_get_all_strings(&listeners, "bridge.%s.openflow.listeners", br->name);
1313 svec_add_nocopy(&listeners, xasprintf("punix:%s/%s.mgmt",
1314 ovs_rundir, br->name));
1315 } else if (listeners.n == 1 && !strcmp(listeners.names[0], "none")) {
1316 svec_clear(&listeners);
1318 svec_sort_unique(&listeners);
1320 svec_init(&old_listeners);
1321 ofproto_get_listeners(br->ofproto, &old_listeners);
1322 svec_sort_unique(&old_listeners);
1324 if (!svec_equal(&listeners, &old_listeners)) {
1325 ofproto_set_listeners(br->ofproto, &listeners);
1327 svec_destroy(&listeners);
1328 svec_destroy(&old_listeners);
1330 /* Configure OpenFlow controller connection snooping. */
1332 cfg_get_all_strings(&snoops, "bridge.%s.openflow.snoops", br->name);
1334 svec_add_nocopy(&snoops, xasprintf("punix:%s/%s.snoop",
1335 ovs_rundir, br->name));
1336 } else if (snoops.n == 1 && !strcmp(snoops.names[0], "none")) {
1337 svec_clear(&snoops);
1339 svec_sort_unique(&snoops);
1341 svec_init(&old_snoops);
1342 ofproto_get_snoops(br->ofproto, &old_snoops);
1343 svec_sort_unique(&old_snoops);
1345 if (!svec_equal(&snoops, &old_snoops)) {
1346 ofproto_set_snoops(br->ofproto, &snoops);
1348 svec_destroy(&snoops);
1349 svec_destroy(&old_snoops);
1351 /* Default listener. */
1352 svec_init(&listeners);
1353 svec_add_nocopy(&listeners, xasprintf("punix:%s/%s.mgmt",
1354 ovs_rundir, br->name));
1355 svec_init(&old_listeners);
1356 ofproto_get_listeners(br->ofproto, &old_listeners);
1357 if (!svec_equal(&listeners, &old_listeners)) {
1358 ofproto_set_listeners(br->ofproto, &listeners);
1360 svec_destroy(&listeners);
1361 svec_destroy(&old_listeners);
1363 /* Default snoop. */
1365 svec_add_nocopy(&snoops, xasprintf("punix:%s/%s.snoop",
1366 ovs_rundir, br->name));
1367 svec_init(&old_snoops);
1368 ofproto_get_snoops(br->ofproto, &old_snoops);
1369 if (!svec_equal(&snoops, &old_snoops)) {
1370 ofproto_set_snoops(br->ofproto, &snoops);
1372 svec_destroy(&snoops);
1373 svec_destroy(&old_snoops);
1377 mirror_reconfigure(br);
1382 bridge_reconfigure_controller(const struct ovsrec_open_vswitch *ovs_cfg,
1385 char *pfx = xasprintf("bridge.%s.controller", br->name);
1386 const struct ovsrec_controller *c;
1388 c = bridge_get_controller(ovs_cfg, br);
1389 if ((br->controller != NULL) != (c != NULL)) {
1390 ofproto_flush_flows(br->ofproto);
1392 free(br->controller);
1393 br->controller = c ? xstrdup(c->target) : NULL;
1396 int max_backoff, probe;
1397 int rate_limit, burst_limit;
1399 if (!strcmp(c->target, "discover")) {
1400 ofproto_set_discovery(br->ofproto, true,
1401 c->discover_accept_regex,
1402 c->discover_update_resolv_conf);
1404 struct iface *local_iface;
1408 in_band = (!c->connection_mode
1409 || !strcmp(c->connection_mode, "out-of-band"));
1410 ofproto_set_discovery(br->ofproto, false, NULL, NULL);
1411 ofproto_set_in_band(br->ofproto, in_band);
1413 local_iface = bridge_get_local_iface(br);
1414 if (local_iface && c->local_ip && inet_aton(c->local_ip, &ip)) {
1415 struct netdev *netdev = local_iface->netdev;
1416 struct in_addr ip, mask, gateway;
1418 if (!c->local_netmask || !inet_aton(c->local_netmask, &mask)) {
1421 if (!c->local_gateway
1422 || !inet_aton(c->local_gateway, &gateway)) {
1426 netdev_turn_flags_on(netdev, NETDEV_UP, true);
1428 mask.s_addr = guess_netmask(ip.s_addr);
1430 if (!netdev_set_in4(netdev, ip, mask)) {
1431 VLOG_INFO("bridge %s: configured IP address "IP_FMT", "
1433 br->name, IP_ARGS(&ip.s_addr),
1434 IP_ARGS(&mask.s_addr));
1437 if (gateway.s_addr) {
1438 if (!netdev_add_router(netdev, gateway)) {
1439 VLOG_INFO("bridge %s: configured gateway "IP_FMT,
1440 br->name, IP_ARGS(&gateway.s_addr));
1446 ofproto_set_failure(br->ofproto,
1448 || !strcmp(c->fail_mode, "standalone")
1449 || !strcmp(c->fail_mode, "open")));
1451 probe = c->inactivity_probe ? *c->inactivity_probe / 1000 : 5;
1452 ofproto_set_probe_interval(br->ofproto, probe);
1454 max_backoff = c->max_backoff ? *c->max_backoff / 1000 : 8;
1455 ofproto_set_max_backoff(br->ofproto, max_backoff);
1457 rate_limit = c->controller_rate_limit ? *c->controller_rate_limit : 0;
1458 burst_limit = c->controller_burst_limit ? *c->controller_burst_limit : 0;
1459 ofproto_set_rate_limit(br->ofproto, rate_limit, burst_limit);
1461 ofproto_set_remote_execution(br->ofproto, NULL, NULL); /* XXX */
1463 union ofp_action action;
1466 /* Set up a flow that matches every packet and directs them to
1467 * OFPP_NORMAL (which goes to us). */
1468 memset(&action, 0, sizeof action);
1469 action.type = htons(OFPAT_OUTPUT);
1470 action.output.len = htons(sizeof action);
1471 action.output.port = htons(OFPP_NORMAL);
1472 memset(&flow, 0, sizeof flow);
1473 ofproto_add_flow(br->ofproto, &flow, OFPFW_ALL, 0,
1476 ofproto_set_in_band(br->ofproto, false);
1477 ofproto_set_max_backoff(br->ofproto, 1);
1478 ofproto_set_probe_interval(br->ofproto, 5);
1479 ofproto_set_failure(br->ofproto, false);
1483 ofproto_set_controller(br->ofproto, br->controller);
1487 bridge_get_all_ifaces(const struct bridge *br, struct shash *ifaces)
1492 for (i = 0; i < br->n_ports; i++) {
1493 struct port *port = br->ports[i];
1494 for (j = 0; j < port->n_ifaces; j++) {
1495 struct iface *iface = port->ifaces[j];
1496 shash_add_once(ifaces, iface->name, iface);
1498 if (port->n_ifaces > 1 && port->cfg->bond_fake_iface) {
1499 shash_add_once(ifaces, port->name, NULL);
1504 /* For robustness, in case the administrator moves around datapath ports behind
1505 * our back, we re-check all the datapath port numbers here.
1507 * This function will set the 'dp_ifidx' members of interfaces that have
1508 * disappeared to -1, so only call this function from a context where those
1509 * 'struct iface's will be removed from the bridge. Otherwise, the -1
1510 * 'dp_ifidx'es will cause trouble later when we try to send them to the
1511 * datapath, which doesn't support UINT16_MAX+1 ports. */
1513 bridge_fetch_dp_ifaces(struct bridge *br)
1515 struct odp_port *dpif_ports;
1516 size_t n_dpif_ports;
1519 /* Reset all interface numbers. */
1520 for (i = 0; i < br->n_ports; i++) {
1521 struct port *port = br->ports[i];
1522 for (j = 0; j < port->n_ifaces; j++) {
1523 struct iface *iface = port->ifaces[j];
1524 iface->dp_ifidx = -1;
1527 port_array_clear(&br->ifaces);
1529 dpif_port_list(br->dpif, &dpif_ports, &n_dpif_ports);
1530 for (i = 0; i < n_dpif_ports; i++) {
1531 struct odp_port *p = &dpif_ports[i];
1532 struct iface *iface = iface_lookup(br, p->devname);
1534 if (iface->dp_ifidx >= 0) {
1535 VLOG_WARN("%s reported interface %s twice",
1536 dpif_name(br->dpif), p->devname);
1537 } else if (iface_from_dp_ifidx(br, p->port)) {
1538 VLOG_WARN("%s reported interface %"PRIu16" twice",
1539 dpif_name(br->dpif), p->port);
1541 port_array_set(&br->ifaces, p->port, iface);
1542 iface->dp_ifidx = p->port;
1546 int64_t ofport = (iface->dp_ifidx >= 0
1547 ? odp_port_to_ofp_port(iface->dp_ifidx)
1549 ovsrec_interface_set_ofport(iface->cfg, &ofport, 1);
1556 /* Bridge packet processing functions. */
1559 bond_hash(const uint8_t mac[ETH_ADDR_LEN])
1561 return hash_bytes(mac, ETH_ADDR_LEN, 0) & BOND_MASK;
1564 static struct bond_entry *
1565 lookup_bond_entry(const struct port *port, const uint8_t mac[ETH_ADDR_LEN])
1567 return &port->bond_hash[bond_hash(mac)];
1571 bond_choose_iface(const struct port *port)
1573 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1574 size_t i, best_down_slave = -1;
1575 long long next_delay_expiration = LLONG_MAX;
1577 for (i = 0; i < port->n_ifaces; i++) {
1578 struct iface *iface = port->ifaces[i];
1580 if (iface->enabled) {
1582 } else if (iface->delay_expires < next_delay_expiration) {
1583 best_down_slave = i;
1584 next_delay_expiration = iface->delay_expires;
1588 if (best_down_slave != -1) {
1589 struct iface *iface = port->ifaces[best_down_slave];
1591 VLOG_INFO_RL(&rl, "interface %s: skipping remaining %lli ms updelay "
1592 "since no other interface is up", iface->name,
1593 iface->delay_expires - time_msec());
1594 bond_enable_slave(iface, true);
1597 return best_down_slave;
1601 choose_output_iface(const struct port *port, const uint8_t *dl_src,
1602 uint16_t *dp_ifidx, tag_type *tags)
1604 struct iface *iface;
1606 assert(port->n_ifaces);
1607 if (port->n_ifaces == 1) {
1608 iface = port->ifaces[0];
1610 struct bond_entry *e = lookup_bond_entry(port, dl_src);
1611 if (e->iface_idx < 0 || e->iface_idx >= port->n_ifaces
1612 || !port->ifaces[e->iface_idx]->enabled) {
1613 /* XXX select interface properly. The current interface selection
1614 * is only good for testing the rebalancing code. */
1615 e->iface_idx = bond_choose_iface(port);
1616 if (e->iface_idx < 0) {
1617 *tags |= port->no_ifaces_tag;
1620 e->iface_tag = tag_create_random();
1621 ((struct port *) port)->bond_compat_is_stale = true;
1623 *tags |= e->iface_tag;
1624 iface = port->ifaces[e->iface_idx];
1626 *dp_ifidx = iface->dp_ifidx;
1627 *tags |= iface->tag; /* Currently only used for bonding. */
1632 bond_link_status_update(struct iface *iface, bool carrier)
1634 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1635 struct port *port = iface->port;
1637 if ((carrier == iface->enabled) == (iface->delay_expires == LLONG_MAX)) {
1638 /* Nothing to do. */
1641 VLOG_INFO_RL(&rl, "interface %s: carrier %s",
1642 iface->name, carrier ? "detected" : "dropped");
1643 if (carrier == iface->enabled) {
1644 iface->delay_expires = LLONG_MAX;
1645 VLOG_INFO_RL(&rl, "interface %s: will not be %s",
1646 iface->name, carrier ? "disabled" : "enabled");
1647 } else if (carrier && port->active_iface < 0) {
1648 bond_enable_slave(iface, true);
1649 if (port->updelay) {
1650 VLOG_INFO_RL(&rl, "interface %s: skipping %d ms updelay since no "
1651 "other interface is up", iface->name, port->updelay);
1654 int delay = carrier ? port->updelay : port->downdelay;
1655 iface->delay_expires = time_msec() + delay;
1658 "interface %s: will be %s if it stays %s for %d ms",
1660 carrier ? "enabled" : "disabled",
1661 carrier ? "up" : "down",
1668 bond_choose_active_iface(struct port *port)
1670 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1672 port->active_iface = bond_choose_iface(port);
1673 port->active_iface_tag = tag_create_random();
1674 if (port->active_iface >= 0) {
1675 VLOG_INFO_RL(&rl, "port %s: active interface is now %s",
1676 port->name, port->ifaces[port->active_iface]->name);
1678 VLOG_WARN_RL(&rl, "port %s: all ports disabled, no active interface",
1684 bond_enable_slave(struct iface *iface, bool enable)
1686 struct port *port = iface->port;
1687 struct bridge *br = port->bridge;
1689 /* This acts as a recursion check. If the act of disabling a slave
1690 * causes a different slave to be enabled, the flag will allow us to
1691 * skip redundant work when we reenter this function. It must be
1692 * cleared on exit to keep things safe with multiple bonds. */
1693 static bool moving_active_iface = false;
1695 iface->delay_expires = LLONG_MAX;
1696 if (enable == iface->enabled) {
1700 iface->enabled = enable;
1701 if (!iface->enabled) {
1702 VLOG_WARN("interface %s: disabled", iface->name);
1703 ofproto_revalidate(br->ofproto, iface->tag);
1704 if (iface->port_ifidx == port->active_iface) {
1705 ofproto_revalidate(br->ofproto,
1706 port->active_iface_tag);
1708 /* Disabling a slave can lead to another slave being immediately
1709 * enabled if there will be no active slaves but one is waiting
1710 * on an updelay. In this case we do not need to run most of the
1711 * code for the newly enabled slave since there was no period
1712 * without an active slave and it is redundant with the disabling
1714 moving_active_iface = true;
1715 bond_choose_active_iface(port);
1717 bond_send_learning_packets(port);
1719 VLOG_WARN("interface %s: enabled", iface->name);
1720 if (port->active_iface < 0 && !moving_active_iface) {
1721 ofproto_revalidate(br->ofproto, port->no_ifaces_tag);
1722 bond_choose_active_iface(port);
1723 bond_send_learning_packets(port);
1725 iface->tag = tag_create_random();
1728 moving_active_iface = false;
1729 port->bond_compat_is_stale = true;
1733 bond_run(struct bridge *br)
1737 for (i = 0; i < br->n_ports; i++) {
1738 struct port *port = br->ports[i];
1740 if (port->n_ifaces >= 2) {
1741 for (j = 0; j < port->n_ifaces; j++) {
1742 struct iface *iface = port->ifaces[j];
1743 if (time_msec() >= iface->delay_expires) {
1744 bond_enable_slave(iface, !iface->enabled);
1749 if (port->bond_compat_is_stale) {
1750 port->bond_compat_is_stale = false;
1751 port_update_bond_compat(port);
1757 bond_wait(struct bridge *br)
1761 for (i = 0; i < br->n_ports; i++) {
1762 struct port *port = br->ports[i];
1763 if (port->n_ifaces < 2) {
1766 for (j = 0; j < port->n_ifaces; j++) {
1767 struct iface *iface = port->ifaces[j];
1768 if (iface->delay_expires != LLONG_MAX) {
1769 poll_timer_wait(iface->delay_expires - time_msec());
1776 set_dst(struct dst *p, const flow_t *flow,
1777 const struct port *in_port, const struct port *out_port,
1780 p->vlan = (out_port->vlan >= 0 ? OFP_VLAN_NONE
1781 : in_port->vlan >= 0 ? in_port->vlan
1782 : ntohs(flow->dl_vlan));
1783 return choose_output_iface(out_port, flow->dl_src, &p->dp_ifidx, tags);
1787 swap_dst(struct dst *p, struct dst *q)
1789 struct dst tmp = *p;
1794 /* Moves all the dsts with vlan == 'vlan' to the front of the 'n_dsts' in
1795 * 'dsts'. (This may help performance by reducing the number of VLAN changes
1796 * that we push to the datapath. We could in fact fully sort the array by
1797 * vlan, but in most cases there are at most two different vlan tags so that's
1798 * possibly overkill.) */
1800 partition_dsts(struct dst *dsts, size_t n_dsts, int vlan)
1802 struct dst *first = dsts;
1803 struct dst *last = dsts + n_dsts;
1805 while (first != last) {
1807 * - All dsts < first have vlan == 'vlan'.
1808 * - All dsts >= last have vlan != 'vlan'.
1809 * - first < last. */
1810 while (first->vlan == vlan) {
1811 if (++first == last) {
1816 /* Same invariants, plus one additional:
1817 * - first->vlan != vlan.
1819 while (last[-1].vlan != vlan) {
1820 if (--last == first) {
1825 /* Same invariants, plus one additional:
1826 * - last[-1].vlan == vlan.*/
1827 swap_dst(first++, --last);
1832 mirror_mask_ffs(mirror_mask_t mask)
1834 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
1839 dst_is_duplicate(const struct dst *dsts, size_t n_dsts,
1840 const struct dst *test)
1843 for (i = 0; i < n_dsts; i++) {
1844 if (dsts[i].vlan == test->vlan && dsts[i].dp_ifidx == test->dp_ifidx) {
1852 port_trunks_vlan(const struct port *port, uint16_t vlan)
1854 return port->vlan < 0 && bitmap_is_set(port->trunks, vlan);
1858 port_includes_vlan(const struct port *port, uint16_t vlan)
1860 return vlan == port->vlan || port_trunks_vlan(port, vlan);
1864 compose_dsts(const struct bridge *br, const flow_t *flow, uint16_t vlan,
1865 const struct port *in_port, const struct port *out_port,
1866 struct dst dsts[], tag_type *tags, uint16_t *nf_output_iface)
1868 mirror_mask_t mirrors = in_port->src_mirrors;
1869 struct dst *dst = dsts;
1872 if (out_port == FLOOD_PORT) {
1873 /* XXX use ODP_FLOOD if no vlans or bonding. */
1874 /* XXX even better, define each VLAN as a datapath port group */
1875 for (i = 0; i < br->n_ports; i++) {
1876 struct port *port = br->ports[i];
1877 if (port != in_port && port_includes_vlan(port, vlan)
1878 && !port->is_mirror_output_port
1879 && set_dst(dst, flow, in_port, port, tags)) {
1880 mirrors |= port->dst_mirrors;
1884 *nf_output_iface = NF_OUT_FLOOD;
1885 } else if (out_port && set_dst(dst, flow, in_port, out_port, tags)) {
1886 *nf_output_iface = dst->dp_ifidx;
1887 mirrors |= out_port->dst_mirrors;
1892 struct mirror *m = br->mirrors[mirror_mask_ffs(mirrors) - 1];
1893 if (!m->n_vlans || vlan_is_mirrored(m, vlan)) {
1895 if (set_dst(dst, flow, in_port, m->out_port, tags)
1896 && !dst_is_duplicate(dsts, dst - dsts, dst)) {
1900 for (i = 0; i < br->n_ports; i++) {
1901 struct port *port = br->ports[i];
1902 if (port_includes_vlan(port, m->out_vlan)
1903 && set_dst(dst, flow, in_port, port, tags))
1907 if (port->vlan < 0) {
1908 dst->vlan = m->out_vlan;
1910 if (dst_is_duplicate(dsts, dst - dsts, dst)) {
1914 /* Use the vlan tag on the original flow instead of
1915 * the one passed in the vlan parameter. This ensures
1916 * that we compare the vlan from before any implicit
1917 * tagging tags place. This is necessary because
1918 * dst->vlan is the final vlan, after removing implicit
1920 flow_vlan = ntohs(flow->dl_vlan);
1921 if (flow_vlan == 0) {
1922 flow_vlan = OFP_VLAN_NONE;
1924 if (port == in_port && dst->vlan == flow_vlan) {
1925 /* Don't send out input port on same VLAN. */
1933 mirrors &= mirrors - 1;
1936 partition_dsts(dsts, dst - dsts, ntohs(flow->dl_vlan));
1941 print_dsts(const struct dst *dsts, size_t n)
1943 for (; n--; dsts++) {
1944 printf(">p%"PRIu16, dsts->dp_ifidx);
1945 if (dsts->vlan != OFP_VLAN_NONE) {
1946 printf("v%"PRIu16, dsts->vlan);
1952 compose_actions(struct bridge *br, const flow_t *flow, uint16_t vlan,
1953 const struct port *in_port, const struct port *out_port,
1954 tag_type *tags, struct odp_actions *actions,
1955 uint16_t *nf_output_iface)
1957 struct dst dsts[DP_MAX_PORTS * (MAX_MIRRORS + 1)];
1959 const struct dst *p;
1962 n_dsts = compose_dsts(br, flow, vlan, in_port, out_port, dsts, tags,
1965 cur_vlan = ntohs(flow->dl_vlan);
1966 for (p = dsts; p < &dsts[n_dsts]; p++) {
1967 union odp_action *a;
1968 if (p->vlan != cur_vlan) {
1969 if (p->vlan == OFP_VLAN_NONE) {
1970 odp_actions_add(actions, ODPAT_STRIP_VLAN);
1972 a = odp_actions_add(actions, ODPAT_SET_VLAN_VID);
1973 a->vlan_vid.vlan_vid = htons(p->vlan);
1977 a = odp_actions_add(actions, ODPAT_OUTPUT);
1978 a->output.port = p->dp_ifidx;
1982 /* Returns the effective vlan of a packet, taking into account both the
1983 * 802.1Q header and implicitly tagged ports. A value of 0 indicates that
1984 * the packet is untagged and -1 indicates it has an invalid header and
1985 * should be dropped. */
1986 static int flow_get_vlan(struct bridge *br, const flow_t *flow,
1987 struct port *in_port, bool have_packet)
1989 /* Note that dl_vlan of 0 and of OFP_VLAN_NONE both mean that the packet
1990 * belongs to VLAN 0, so we should treat both cases identically. (In the
1991 * former case, the packet has an 802.1Q header that specifies VLAN 0,
1992 * presumably to allow a priority to be specified. In the latter case, the
1993 * packet does not have any 802.1Q header.) */
1994 int vlan = ntohs(flow->dl_vlan);
1995 if (vlan == OFP_VLAN_NONE) {
1998 if (in_port->vlan >= 0) {
2000 /* XXX support double tagging? */
2002 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2003 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
2004 "packet received on port %s configured with "
2005 "implicit VLAN %"PRIu16,
2006 br->name, ntohs(flow->dl_vlan),
2007 in_port->name, in_port->vlan);
2011 vlan = in_port->vlan;
2013 if (!port_includes_vlan(in_port, vlan)) {
2015 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2016 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
2017 "packet received on port %s not configured for "
2019 br->name, vlan, in_port->name, vlan);
2029 update_learning_table(struct bridge *br, const flow_t *flow, int vlan,
2030 struct port *in_port)
2032 tag_type rev_tag = mac_learning_learn(br->ml, flow->dl_src,
2033 vlan, in_port->port_idx);
2035 /* The log messages here could actually be useful in debugging,
2036 * so keep the rate limit relatively high. */
2037 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30,
2039 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
2040 "on port %s in VLAN %d",
2041 br->name, ETH_ADDR_ARGS(flow->dl_src),
2042 in_port->name, vlan);
2043 ofproto_revalidate(br->ofproto, rev_tag);
2048 is_bcast_arp_reply(const flow_t *flow)
2050 return (flow->dl_type == htons(ETH_TYPE_ARP)
2051 && flow->nw_proto == ARP_OP_REPLY
2052 && eth_addr_is_broadcast(flow->dl_dst));
2055 /* If the composed actions may be applied to any packet in the given 'flow',
2056 * returns true. Otherwise, the actions should only be applied to 'packet', or
2057 * not at all, if 'packet' was NULL. */
2059 process_flow(struct bridge *br, const flow_t *flow,
2060 const struct ofpbuf *packet, struct odp_actions *actions,
2061 tag_type *tags, uint16_t *nf_output_iface)
2063 struct iface *in_iface;
2064 struct port *in_port;
2065 struct port *out_port = NULL; /* By default, drop the packet/flow. */
2069 /* Find the interface and port structure for the received packet. */
2070 in_iface = iface_from_dp_ifidx(br, flow->in_port);
2072 /* No interface? Something fishy... */
2073 if (packet != NULL) {
2074 /* Odd. A few possible reasons here:
2076 * - We deleted an interface but there are still a few packets
2077 * queued up from it.
2079 * - Someone externally added an interface (e.g. with "ovs-dpctl
2080 * add-if") that we don't know about.
2082 * - Packet arrived on the local port but the local port is not
2083 * one of our bridge ports.
2085 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2087 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
2088 "interface %"PRIu16, br->name, flow->in_port);
2091 /* Return without adding any actions, to drop packets on this flow. */
2094 in_port = in_iface->port;
2095 vlan = flow_get_vlan(br, flow, in_port, !!packet);
2100 /* Drop frames for reserved multicast addresses. */
2101 if (eth_addr_is_reserved(flow->dl_dst)) {
2105 /* Drop frames on ports reserved for mirroring. */
2106 if (in_port->is_mirror_output_port) {
2107 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2108 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port %s, "
2109 "which is reserved exclusively for mirroring",
2110 br->name, in_port->name);
2114 /* Packets received on bonds need special attention to avoid duplicates. */
2115 if (in_port->n_ifaces > 1) {
2118 if (eth_addr_is_multicast(flow->dl_dst)) {
2119 *tags |= in_port->active_iface_tag;
2120 if (in_port->active_iface != in_iface->port_ifidx) {
2121 /* Drop all multicast packets on inactive slaves. */
2126 /* Drop all packets for which we have learned a different input
2127 * port, because we probably sent the packet on one slave and got
2128 * it back on the other. Broadcast ARP replies are an exception
2129 * to this rule: the host has moved to another switch. */
2130 src_idx = mac_learning_lookup(br->ml, flow->dl_src, vlan);
2131 if (src_idx != -1 && src_idx != in_port->port_idx &&
2132 !is_bcast_arp_reply(flow)) {
2138 out_port = FLOOD_PORT;
2139 /* Learn source MAC (but don't try to learn from revalidation). */
2141 update_learning_table(br, flow, vlan, in_port);
2144 /* Determine output port. */
2145 out_port_idx = mac_learning_lookup_tag(br->ml, flow->dl_dst, vlan,
2147 if (out_port_idx >= 0 && out_port_idx < br->n_ports) {
2148 out_port = br->ports[out_port_idx];
2149 } else if (!packet && !eth_addr_is_multicast(flow->dl_dst)) {
2150 /* If we are revalidating but don't have a learning entry then
2151 * eject the flow. Installing a flow that floods packets opens
2152 * up a window of time where we could learn from a packet reflected
2153 * on a bond and blackhole packets before the learning table is
2154 * updated to reflect the correct port. */
2158 /* Don't send packets out their input ports. */
2159 if (in_port == out_port) {
2164 compose_actions(br, flow, vlan, in_port, out_port, tags, actions,
2170 /* Careful: 'opp' is in host byte order and opp->port_no is an OFP port
2173 bridge_port_changed_ofhook_cb(enum ofp_port_reason reason,
2174 const struct ofp_phy_port *opp,
2177 struct bridge *br = br_;
2178 struct iface *iface;
2181 iface = iface_from_dp_ifidx(br, ofp_port_to_odp_port(opp->port_no));
2187 if (reason == OFPPR_DELETE) {
2188 VLOG_WARN("bridge %s: interface %s deleted unexpectedly",
2189 br->name, iface->name);
2190 iface_destroy(iface);
2191 if (!port->n_ifaces) {
2192 VLOG_WARN("bridge %s: port %s has no interfaces, dropping",
2193 br->name, port->name);
2199 if (port->n_ifaces > 1) {
2200 bool up = !(opp->state & OFPPS_LINK_DOWN);
2201 bond_link_status_update(iface, up);
2202 port_update_bond_compat(port);
2208 bridge_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
2209 struct odp_actions *actions, tag_type *tags,
2210 uint16_t *nf_output_iface, void *br_)
2212 struct bridge *br = br_;
2214 COVERAGE_INC(bridge_process_flow);
2215 return process_flow(br, flow, packet, actions, tags, nf_output_iface);
2219 bridge_account_flow_ofhook_cb(const flow_t *flow,
2220 const union odp_action *actions,
2221 size_t n_actions, unsigned long long int n_bytes,
2224 struct bridge *br = br_;
2225 struct port *in_port;
2226 const union odp_action *a;
2228 /* Feed information from the active flows back into the learning table
2229 * to ensure that table is always in sync with what is actually flowing
2230 * through the datapath. */
2231 in_port = port_from_dp_ifidx(br, flow->in_port);
2233 int vlan = flow_get_vlan(br, flow, in_port, false);
2235 update_learning_table(br, flow, vlan, in_port);
2239 if (!br->has_bonded_ports) {
2243 for (a = actions; a < &actions[n_actions]; a++) {
2244 if (a->type == ODPAT_OUTPUT) {
2245 struct port *out_port = port_from_dp_ifidx(br, a->output.port);
2246 if (out_port && out_port->n_ifaces >= 2) {
2247 struct bond_entry *e = lookup_bond_entry(out_port,
2249 e->tx_bytes += n_bytes;
2256 bridge_account_checkpoint_ofhook_cb(void *br_)
2258 struct bridge *br = br_;
2261 if (!br->has_bonded_ports) {
2265 /* The current ofproto implementation calls this callback at least once a
2266 * second, so this timer implementation is sufficient. */
2267 if (time_msec() < br->bond_next_rebalance) {
2270 br->bond_next_rebalance = time_msec() + 10000;
2272 for (i = 0; i < br->n_ports; i++) {
2273 struct port *port = br->ports[i];
2274 if (port->n_ifaces > 1) {
2275 bond_rebalance_port(port);
2280 static struct ofhooks bridge_ofhooks = {
2281 bridge_port_changed_ofhook_cb,
2282 bridge_normal_ofhook_cb,
2283 bridge_account_flow_ofhook_cb,
2284 bridge_account_checkpoint_ofhook_cb,
2287 /* Bonding functions. */
2289 /* Statistics for a single interface on a bonded port, used for load-based
2290 * bond rebalancing. */
2291 struct slave_balance {
2292 struct iface *iface; /* The interface. */
2293 uint64_t tx_bytes; /* Sum of hashes[*]->tx_bytes. */
2295 /* All the "bond_entry"s that are assigned to this interface, in order of
2296 * increasing tx_bytes. */
2297 struct bond_entry **hashes;
2301 /* Sorts pointers to pointers to bond_entries in ascending order by the
2302 * interface to which they are assigned, and within a single interface in
2303 * ascending order of bytes transmitted. */
2305 compare_bond_entries(const void *a_, const void *b_)
2307 const struct bond_entry *const *ap = a_;
2308 const struct bond_entry *const *bp = b_;
2309 const struct bond_entry *a = *ap;
2310 const struct bond_entry *b = *bp;
2311 if (a->iface_idx != b->iface_idx) {
2312 return a->iface_idx > b->iface_idx ? 1 : -1;
2313 } else if (a->tx_bytes != b->tx_bytes) {
2314 return a->tx_bytes > b->tx_bytes ? 1 : -1;
2320 /* Sorts slave_balances so that enabled ports come first, and otherwise in
2321 * *descending* order by number of bytes transmitted. */
2323 compare_slave_balance(const void *a_, const void *b_)
2325 const struct slave_balance *a = a_;
2326 const struct slave_balance *b = b_;
2327 if (a->iface->enabled != b->iface->enabled) {
2328 return a->iface->enabled ? -1 : 1;
2329 } else if (a->tx_bytes != b->tx_bytes) {
2330 return a->tx_bytes > b->tx_bytes ? -1 : 1;
2337 swap_bals(struct slave_balance *a, struct slave_balance *b)
2339 struct slave_balance tmp = *a;
2344 /* Restores the 'n_bals' slave_balance structures in 'bals' to sorted order
2345 * given that 'p' (and only 'p') might be in the wrong location.
2347 * This function invalidates 'p', since it might now be in a different memory
2350 resort_bals(struct slave_balance *p,
2351 struct slave_balance bals[], size_t n_bals)
2354 for (; p > bals && p->tx_bytes > p[-1].tx_bytes; p--) {
2355 swap_bals(p, p - 1);
2357 for (; p < &bals[n_bals - 1] && p->tx_bytes < p[1].tx_bytes; p++) {
2358 swap_bals(p, p + 1);
2364 log_bals(const struct slave_balance *bals, size_t n_bals, struct port *port)
2366 if (VLOG_IS_DBG_ENABLED()) {
2367 struct ds ds = DS_EMPTY_INITIALIZER;
2368 const struct slave_balance *b;
2370 for (b = bals; b < bals + n_bals; b++) {
2374 ds_put_char(&ds, ',');
2376 ds_put_format(&ds, " %s %"PRIu64"kB",
2377 b->iface->name, b->tx_bytes / 1024);
2379 if (!b->iface->enabled) {
2380 ds_put_cstr(&ds, " (disabled)");
2382 if (b->n_hashes > 0) {
2383 ds_put_cstr(&ds, " (");
2384 for (i = 0; i < b->n_hashes; i++) {
2385 const struct bond_entry *e = b->hashes[i];
2387 ds_put_cstr(&ds, " + ");
2389 ds_put_format(&ds, "h%td: %"PRIu64"kB",
2390 e - port->bond_hash, e->tx_bytes / 1024);
2392 ds_put_cstr(&ds, ")");
2395 VLOG_DBG("bond %s:%s", port->name, ds_cstr(&ds));
2400 /* Shifts 'hash' from 'from' to 'to' within 'port'. */
2402 bond_shift_load(struct slave_balance *from, struct slave_balance *to,
2405 struct bond_entry *hash = from->hashes[hash_idx];
2406 struct port *port = from->iface->port;
2407 uint64_t delta = hash->tx_bytes;
2409 VLOG_INFO("bond %s: shift %"PRIu64"kB of load (with hash %td) "
2410 "from %s to %s (now carrying %"PRIu64"kB and "
2411 "%"PRIu64"kB load, respectively)",
2412 port->name, delta / 1024, hash - port->bond_hash,
2413 from->iface->name, to->iface->name,
2414 (from->tx_bytes - delta) / 1024,
2415 (to->tx_bytes + delta) / 1024);
2417 /* Delete element from from->hashes.
2419 * We don't bother to add the element to to->hashes because not only would
2420 * it require more work, the only purpose it would be to allow that hash to
2421 * be migrated to another slave in this rebalancing run, and there is no
2422 * point in doing that. */
2423 if (hash_idx == 0) {
2426 memmove(from->hashes + hash_idx, from->hashes + hash_idx + 1,
2427 (from->n_hashes - (hash_idx + 1)) * sizeof *from->hashes);
2431 /* Shift load away from 'from' to 'to'. */
2432 from->tx_bytes -= delta;
2433 to->tx_bytes += delta;
2435 /* Arrange for flows to be revalidated. */
2436 ofproto_revalidate(port->bridge->ofproto, hash->iface_tag);
2437 hash->iface_idx = to->iface->port_ifidx;
2438 hash->iface_tag = tag_create_random();
2442 bond_rebalance_port(struct port *port)
2444 struct slave_balance bals[DP_MAX_PORTS];
2446 struct bond_entry *hashes[BOND_MASK + 1];
2447 struct slave_balance *b, *from, *to;
2448 struct bond_entry *e;
2451 /* Sets up 'bals' to describe each of the port's interfaces, sorted in
2452 * descending order of tx_bytes, so that bals[0] represents the most
2453 * heavily loaded slave and bals[n_bals - 1] represents the least heavily
2456 * The code is a bit tricky: to avoid dynamically allocating a 'hashes'
2457 * array for each slave_balance structure, we sort our local array of
2458 * hashes in order by slave, so that all of the hashes for a given slave
2459 * become contiguous in memory, and then we point each 'hashes' members of
2460 * a slave_balance structure to the start of a contiguous group. */
2461 n_bals = port->n_ifaces;
2462 for (b = bals; b < &bals[n_bals]; b++) {
2463 b->iface = port->ifaces[b - bals];
2468 for (i = 0; i <= BOND_MASK; i++) {
2469 hashes[i] = &port->bond_hash[i];
2471 qsort(hashes, BOND_MASK + 1, sizeof *hashes, compare_bond_entries);
2472 for (i = 0; i <= BOND_MASK; i++) {
2474 if (e->iface_idx >= 0 && e->iface_idx < port->n_ifaces) {
2475 b = &bals[e->iface_idx];
2476 b->tx_bytes += e->tx_bytes;
2478 b->hashes = &hashes[i];
2483 qsort(bals, n_bals, sizeof *bals, compare_slave_balance);
2484 log_bals(bals, n_bals, port);
2486 /* Discard slaves that aren't enabled (which were sorted to the back of the
2487 * array earlier). */
2488 while (!bals[n_bals - 1].iface->enabled) {
2495 /* Shift load from the most-loaded slaves to the least-loaded slaves. */
2496 to = &bals[n_bals - 1];
2497 for (from = bals; from < to; ) {
2498 uint64_t overload = from->tx_bytes - to->tx_bytes;
2499 if (overload < to->tx_bytes >> 5 || overload < 100000) {
2500 /* The extra load on 'from' (and all less-loaded slaves), compared
2501 * to that of 'to' (the least-loaded slave), is less than ~3%, or
2502 * it is less than ~1Mbps. No point in rebalancing. */
2504 } else if (from->n_hashes == 1) {
2505 /* 'from' only carries a single MAC hash, so we can't shift any
2506 * load away from it, even though we want to. */
2509 /* 'from' is carrying significantly more load than 'to', and that
2510 * load is split across at least two different hashes. Pick a hash
2511 * to migrate to 'to' (the least-loaded slave), given that doing so
2512 * must decrease the ratio of the load on the two slaves by at
2515 * The sort order we use means that we prefer to shift away the
2516 * smallest hashes instead of the biggest ones. There is little
2517 * reason behind this decision; we could use the opposite sort
2518 * order to shift away big hashes ahead of small ones. */
2522 for (i = 0; i < from->n_hashes; i++) {
2523 double old_ratio, new_ratio;
2524 uint64_t delta = from->hashes[i]->tx_bytes;
2526 if (delta == 0 || from->tx_bytes - delta == 0) {
2527 /* Pointless move. */
2531 order_swapped = from->tx_bytes - delta < to->tx_bytes + delta;
2533 if (to->tx_bytes == 0) {
2534 /* Nothing on the new slave, move it. */
2538 old_ratio = (double)from->tx_bytes / to->tx_bytes;
2539 new_ratio = (double)(from->tx_bytes - delta) /
2540 (to->tx_bytes + delta);
2542 if (new_ratio == 0) {
2543 /* Should already be covered but check to prevent division
2548 if (new_ratio < 1) {
2549 new_ratio = 1 / new_ratio;
2552 if (old_ratio - new_ratio > 0.1) {
2553 /* Would decrease the ratio, move it. */
2557 if (i < from->n_hashes) {
2558 bond_shift_load(from, to, i);
2559 port->bond_compat_is_stale = true;
2561 /* If the result of the migration changed the relative order of
2562 * 'from' and 'to' swap them back to maintain invariants. */
2563 if (order_swapped) {
2564 swap_bals(from, to);
2567 /* Re-sort 'bals'. Note that this may make 'from' and 'to'
2568 * point to different slave_balance structures. It is only
2569 * valid to do these two operations in a row at all because we
2570 * know that 'from' will not move past 'to' and vice versa. */
2571 resort_bals(from, bals, n_bals);
2572 resort_bals(to, bals, n_bals);
2579 /* Implement exponentially weighted moving average. A weight of 1/2 causes
2580 * historical data to decay to <1% in 7 rebalancing runs. */
2581 for (e = &port->bond_hash[0]; e <= &port->bond_hash[BOND_MASK]; e++) {
2587 bond_send_learning_packets(struct port *port)
2589 struct bridge *br = port->bridge;
2590 struct mac_entry *e;
2591 struct ofpbuf packet;
2592 int error, n_packets, n_errors;
2594 if (!port->n_ifaces || port->active_iface < 0) {
2598 ofpbuf_init(&packet, 128);
2599 error = n_packets = n_errors = 0;
2600 LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
2601 union ofp_action actions[2], *a;
2607 if (e->port == port->port_idx
2608 || !choose_output_iface(port, e->mac, &dp_ifidx, &tags)) {
2612 /* Compose actions. */
2613 memset(actions, 0, sizeof actions);
2616 a->vlan_vid.type = htons(OFPAT_SET_VLAN_VID);
2617 a->vlan_vid.len = htons(sizeof *a);
2618 a->vlan_vid.vlan_vid = htons(e->vlan);
2621 a->output.type = htons(OFPAT_OUTPUT);
2622 a->output.len = htons(sizeof *a);
2623 a->output.port = htons(odp_port_to_ofp_port(dp_ifidx));
2628 compose_benign_packet(&packet, "Open vSwitch Bond Failover", 0xf177,
2630 flow_extract(&packet, ODPP_NONE, &flow);
2631 retval = ofproto_send_packet(br->ofproto, &flow, actions, a - actions,
2638 ofpbuf_uninit(&packet);
2641 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2642 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2643 "packets, last error was: %s",
2644 port->name, n_errors, n_packets, strerror(error));
2646 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2647 port->name, n_packets);
2651 /* Bonding unixctl user interface functions. */
2654 bond_unixctl_list(struct unixctl_conn *conn,
2655 const char *args UNUSED, void *aux UNUSED)
2657 struct ds ds = DS_EMPTY_INITIALIZER;
2658 const struct bridge *br;
2660 ds_put_cstr(&ds, "bridge\tbond\tslaves\n");
2662 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
2665 for (i = 0; i < br->n_ports; i++) {
2666 const struct port *port = br->ports[i];
2667 if (port->n_ifaces > 1) {
2670 ds_put_format(&ds, "%s\t%s\t", br->name, port->name);
2671 for (j = 0; j < port->n_ifaces; j++) {
2672 const struct iface *iface = port->ifaces[j];
2674 ds_put_cstr(&ds, ", ");
2676 ds_put_cstr(&ds, iface->name);
2678 ds_put_char(&ds, '\n');
2682 unixctl_command_reply(conn, 200, ds_cstr(&ds));
2686 static struct port *
2687 bond_find(const char *name)
2689 const struct bridge *br;
2691 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
2694 for (i = 0; i < br->n_ports; i++) {
2695 struct port *port = br->ports[i];
2696 if (!strcmp(port->name, name) && port->n_ifaces > 1) {
2705 bond_unixctl_show(struct unixctl_conn *conn,
2706 const char *args, void *aux UNUSED)
2708 struct ds ds = DS_EMPTY_INITIALIZER;
2709 const struct port *port;
2712 port = bond_find(args);
2714 unixctl_command_reply(conn, 501, "no such bond");
2718 ds_put_format(&ds, "updelay: %d ms\n", port->updelay);
2719 ds_put_format(&ds, "downdelay: %d ms\n", port->downdelay);
2720 ds_put_format(&ds, "next rebalance: %lld ms\n",
2721 port->bridge->bond_next_rebalance - time_msec());
2722 for (j = 0; j < port->n_ifaces; j++) {
2723 const struct iface *iface = port->ifaces[j];
2724 struct bond_entry *be;
2727 ds_put_format(&ds, "slave %s: %s\n",
2728 iface->name, iface->enabled ? "enabled" : "disabled");
2729 if (j == port->active_iface) {
2730 ds_put_cstr(&ds, "\tactive slave\n");
2732 if (iface->delay_expires != LLONG_MAX) {
2733 ds_put_format(&ds, "\t%s expires in %lld ms\n",
2734 iface->enabled ? "downdelay" : "updelay",
2735 iface->delay_expires - time_msec());
2739 for (be = port->bond_hash; be <= &port->bond_hash[BOND_MASK]; be++) {
2740 int hash = be - port->bond_hash;
2741 struct mac_entry *me;
2743 if (be->iface_idx != j) {
2747 ds_put_format(&ds, "\thash %d: %"PRIu64" kB load\n",
2748 hash, be->tx_bytes / 1024);
2751 LIST_FOR_EACH (me, struct mac_entry, lru_node,
2752 &port->bridge->ml->lrus) {
2755 if (bond_hash(me->mac) == hash
2756 && me->port != port->port_idx
2757 && choose_output_iface(port, me->mac, &dp_ifidx, &tags)
2758 && dp_ifidx == iface->dp_ifidx)
2760 ds_put_format(&ds, "\t\t"ETH_ADDR_FMT"\n",
2761 ETH_ADDR_ARGS(me->mac));
2766 unixctl_command_reply(conn, 200, ds_cstr(&ds));
2771 bond_unixctl_migrate(struct unixctl_conn *conn, const char *args_,
2774 char *args = (char *) args_;
2775 char *save_ptr = NULL;
2776 char *bond_s, *hash_s, *slave_s;
2777 uint8_t mac[ETH_ADDR_LEN];
2779 struct iface *iface;
2780 struct bond_entry *entry;
2783 bond_s = strtok_r(args, " ", &save_ptr);
2784 hash_s = strtok_r(NULL, " ", &save_ptr);
2785 slave_s = strtok_r(NULL, " ", &save_ptr);
2787 unixctl_command_reply(conn, 501,
2788 "usage: bond/migrate BOND HASH SLAVE");
2792 port = bond_find(bond_s);
2794 unixctl_command_reply(conn, 501, "no such bond");
2798 if (sscanf(hash_s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))
2799 == ETH_ADDR_SCAN_COUNT) {
2800 hash = bond_hash(mac);
2801 } else if (strspn(hash_s, "0123456789") == strlen(hash_s)) {
2802 hash = atoi(hash_s) & BOND_MASK;
2804 unixctl_command_reply(conn, 501, "bad hash");
2808 iface = port_lookup_iface(port, slave_s);
2810 unixctl_command_reply(conn, 501, "no such slave");
2814 if (!iface->enabled) {
2815 unixctl_command_reply(conn, 501, "cannot migrate to disabled slave");
2819 entry = &port->bond_hash[hash];
2820 ofproto_revalidate(port->bridge->ofproto, entry->iface_tag);
2821 entry->iface_idx = iface->port_ifidx;
2822 entry->iface_tag = tag_create_random();
2823 port->bond_compat_is_stale = true;
2824 unixctl_command_reply(conn, 200, "migrated");
2828 bond_unixctl_set_active_slave(struct unixctl_conn *conn, const char *args_,
2831 char *args = (char *) args_;
2832 char *save_ptr = NULL;
2833 char *bond_s, *slave_s;
2835 struct iface *iface;
2837 bond_s = strtok_r(args, " ", &save_ptr);
2838 slave_s = strtok_r(NULL, " ", &save_ptr);
2840 unixctl_command_reply(conn, 501,
2841 "usage: bond/set-active-slave BOND SLAVE");
2845 port = bond_find(bond_s);
2847 unixctl_command_reply(conn, 501, "no such bond");
2851 iface = port_lookup_iface(port, slave_s);
2853 unixctl_command_reply(conn, 501, "no such slave");
2857 if (!iface->enabled) {
2858 unixctl_command_reply(conn, 501, "cannot make disabled slave active");
2862 if (port->active_iface != iface->port_ifidx) {
2863 ofproto_revalidate(port->bridge->ofproto, port->active_iface_tag);
2864 port->active_iface = iface->port_ifidx;
2865 port->active_iface_tag = tag_create_random();
2866 VLOG_INFO("port %s: active interface is now %s",
2867 port->name, iface->name);
2868 bond_send_learning_packets(port);
2869 unixctl_command_reply(conn, 200, "done");
2871 unixctl_command_reply(conn, 200, "no change");
2876 enable_slave(struct unixctl_conn *conn, const char *args_, bool enable)
2878 char *args = (char *) args_;
2879 char *save_ptr = NULL;
2880 char *bond_s, *slave_s;
2882 struct iface *iface;
2884 bond_s = strtok_r(args, " ", &save_ptr);
2885 slave_s = strtok_r(NULL, " ", &save_ptr);
2887 unixctl_command_reply(conn, 501,
2888 "usage: bond/enable/disable-slave BOND SLAVE");
2892 port = bond_find(bond_s);
2894 unixctl_command_reply(conn, 501, "no such bond");
2898 iface = port_lookup_iface(port, slave_s);
2900 unixctl_command_reply(conn, 501, "no such slave");
2904 bond_enable_slave(iface, enable);
2905 unixctl_command_reply(conn, 501, enable ? "enabled" : "disabled");
2909 bond_unixctl_enable_slave(struct unixctl_conn *conn, const char *args,
2912 enable_slave(conn, args, true);
2916 bond_unixctl_disable_slave(struct unixctl_conn *conn, const char *args,
2919 enable_slave(conn, args, false);
2923 bond_unixctl_hash(struct unixctl_conn *conn, const char *args,
2926 uint8_t mac[ETH_ADDR_LEN];
2930 if (sscanf(args, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))
2931 == ETH_ADDR_SCAN_COUNT) {
2932 hash = bond_hash(mac);
2934 hash_cstr = xasprintf("%u", hash);
2935 unixctl_command_reply(conn, 200, hash_cstr);
2938 unixctl_command_reply(conn, 501, "invalid mac");
2945 unixctl_command_register("bond/list", bond_unixctl_list, NULL);
2946 unixctl_command_register("bond/show", bond_unixctl_show, NULL);
2947 unixctl_command_register("bond/migrate", bond_unixctl_migrate, NULL);
2948 unixctl_command_register("bond/set-active-slave",
2949 bond_unixctl_set_active_slave, NULL);
2950 unixctl_command_register("bond/enable-slave", bond_unixctl_enable_slave,
2952 unixctl_command_register("bond/disable-slave", bond_unixctl_disable_slave,
2954 unixctl_command_register("bond/hash", bond_unixctl_hash, NULL);
2957 /* Port functions. */
2959 static struct port *
2960 port_create(struct bridge *br, const char *name)
2964 port = xzalloc(sizeof *port);
2966 port->port_idx = br->n_ports;
2968 port->trunks = NULL;
2969 port->name = xstrdup(name);
2970 port->active_iface = -1;
2972 if (br->n_ports >= br->allocated_ports) {
2973 br->ports = x2nrealloc(br->ports, &br->allocated_ports,
2976 br->ports[br->n_ports++] = port;
2978 VLOG_INFO("created port %s on bridge %s", port->name, br->name);
2985 port_reconfigure(struct port *port, const struct ovsrec_port *cfg)
2987 struct shash old_ifaces, new_ifaces;
2988 struct shash_node *node;
2989 unsigned long *trunks;
2995 /* Collect old and new interfaces. */
2996 shash_init(&old_ifaces);
2997 shash_init(&new_ifaces);
2998 for (i = 0; i < port->n_ifaces; i++) {
2999 shash_add(&old_ifaces, port->ifaces[i]->name, port->ifaces[i]);
3001 for (i = 0; i < cfg->n_interfaces; i++) {
3002 const char *name = cfg->interfaces[i]->name;
3003 if (!shash_add_once(&new_ifaces, name, cfg->interfaces[i])) {
3004 VLOG_WARN("port %s: %s specified twice as port interface",
3008 port->updelay = cfg->bond_updelay;
3009 if (port->updelay < 0) {
3012 port->updelay = cfg->bond_downdelay;
3013 if (port->downdelay < 0) {
3014 port->downdelay = 0;
3017 /* Get rid of deleted interfaces and add new interfaces. */
3018 SHASH_FOR_EACH (node, &old_ifaces) {
3019 if (!shash_find(&new_ifaces, node->name)) {
3020 iface_destroy(node->data);
3023 SHASH_FOR_EACH (node, &new_ifaces) {
3024 const struct ovsrec_interface *if_cfg = node->data;
3025 struct iface *iface;
3027 iface = shash_find_data(&old_ifaces, if_cfg->name);
3029 iface = iface_create(port, if_cfg);
3031 iface->cfg = if_cfg;
3037 if (port->n_ifaces < 2) {
3039 if (vlan >= 0 && vlan <= 4095) {
3040 VLOG_DBG("port %s: assigning VLAN tag %d", port->name, vlan);
3045 /* It's possible that bonded, VLAN-tagged ports make sense. Maybe
3046 * they even work as-is. But they have not been tested. */
3047 VLOG_WARN("port %s: VLAN tags not supported on bonded ports",
3051 if (port->vlan != vlan) {
3053 bridge_flush(port->bridge);
3056 /* Get trunked VLANs. */
3062 trunks = bitmap_allocate(4096);
3064 for (i = 0; i < cfg->n_trunks; i++) {
3065 int trunk = cfg->trunks[i];
3067 bitmap_set1(trunks, trunk);
3073 VLOG_ERR("port %s: invalid values for %zu trunk VLANs",
3074 port->name, cfg->n_trunks);
3076 if (n_errors == cfg->n_trunks) {
3078 VLOG_ERR("port %s: no valid trunks, trunking all VLANs",
3081 bitmap_set_multiple(trunks, 0, 4096, 1);
3084 if (cfg->n_trunks) {
3085 VLOG_ERR("port %s: ignoring trunks in favor of implicit vlan",
3090 ? port->trunks != NULL
3091 : port->trunks == NULL || !bitmap_equal(trunks, port->trunks, 4096)) {
3092 bridge_flush(port->bridge);
3094 bitmap_free(port->trunks);
3095 port->trunks = trunks;
3097 shash_destroy(&old_ifaces);
3098 shash_destroy(&new_ifaces);
3102 port_destroy(struct port *port)
3105 struct bridge *br = port->bridge;
3108 proc_net_compat_update_vlan(port->name, NULL, 0);
3109 proc_net_compat_update_bond(port->name, NULL);
3112 for (i = 0; i < MAX_MIRRORS; i++) {
3113 struct mirror *m = br->mirrors[i];
3114 if (m && m->out_port == port) {
3120 while (port->n_ifaces > 0) {
3121 iface_destroy(port->ifaces[port->n_ifaces - 1]);
3124 del = br->ports[port->port_idx] = br->ports[--br->n_ports];
3125 del->port_idx = port->port_idx;
3128 bitmap_free(port->trunks);
3135 static struct port *
3136 port_from_dp_ifidx(const struct bridge *br, uint16_t dp_ifidx)
3138 struct iface *iface = iface_from_dp_ifidx(br, dp_ifidx);
3139 return iface ? iface->port : NULL;
3142 static struct port *
3143 port_lookup(const struct bridge *br, const char *name)
3147 for (i = 0; i < br->n_ports; i++) {
3148 struct port *port = br->ports[i];
3149 if (!strcmp(port->name, name)) {
3156 static struct iface *
3157 port_lookup_iface(const struct port *port, const char *name)
3161 for (j = 0; j < port->n_ifaces; j++) {
3162 struct iface *iface = port->ifaces[j];
3163 if (!strcmp(iface->name, name)) {
3171 port_update_bonding(struct port *port)
3173 if (port->n_ifaces < 2) {
3174 /* Not a bonded port. */
3175 if (port->bond_hash) {
3176 free(port->bond_hash);
3177 port->bond_hash = NULL;
3178 port->bond_compat_is_stale = true;
3179 port->bond_fake_iface = false;
3182 if (!port->bond_hash) {
3185 port->bond_hash = xcalloc(BOND_MASK + 1, sizeof *port->bond_hash);
3186 for (i = 0; i <= BOND_MASK; i++) {
3187 struct bond_entry *e = &port->bond_hash[i];
3191 port->no_ifaces_tag = tag_create_random();
3192 bond_choose_active_iface(port);
3194 port->bond_compat_is_stale = true;
3195 port->bond_fake_iface = port->cfg->bond_fake_iface;
3200 port_update_bond_compat(struct port *port)
3202 struct compat_bond_hash compat_hashes[BOND_MASK + 1];
3203 struct compat_bond bond;
3206 if (port->n_ifaces < 2) {
3207 proc_net_compat_update_bond(port->name, NULL);
3212 bond.updelay = port->updelay;
3213 bond.downdelay = port->downdelay;
3216 bond.hashes = compat_hashes;
3217 if (port->bond_hash) {
3218 const struct bond_entry *e;
3219 for (e = port->bond_hash; e <= &port->bond_hash[BOND_MASK]; e++) {
3220 if (e->iface_idx >= 0 && e->iface_idx < port->n_ifaces) {
3221 struct compat_bond_hash *cbh = &bond.hashes[bond.n_hashes++];
3222 cbh->hash = e - port->bond_hash;
3223 cbh->netdev_name = port->ifaces[e->iface_idx]->name;
3228 bond.n_slaves = port->n_ifaces;
3229 bond.slaves = xmalloc(port->n_ifaces * sizeof *bond.slaves);
3230 for (i = 0; i < port->n_ifaces; i++) {
3231 struct iface *iface = port->ifaces[i];
3232 struct compat_bond_slave *slave = &bond.slaves[i];
3233 slave->name = iface->name;
3235 /* We need to make the same determination as the Linux bonding
3236 * code to determine whether a slave should be consider "up".
3237 * The Linux function bond_miimon_inspect() supports four
3238 * BOND_LINK_* states:
3240 * - BOND_LINK_UP: carrier detected, updelay has passed.
3241 * - BOND_LINK_FAIL: carrier lost, downdelay in progress.
3242 * - BOND_LINK_DOWN: carrier lost, downdelay has passed.
3243 * - BOND_LINK_BACK: carrier detected, updelay in progress.
3245 * The function bond_info_show_slave() only considers BOND_LINK_UP
3246 * to be "up" and anything else to be "down".
3248 slave->up = iface->enabled && iface->delay_expires == LLONG_MAX;
3252 netdev_get_etheraddr(iface->netdev, slave->mac);
3255 if (port->bond_fake_iface) {
3256 struct netdev *bond_netdev;
3258 if (!netdev_open(port->name, NETDEV_ETH_TYPE_NONE, &bond_netdev)) {
3260 netdev_turn_flags_on(bond_netdev, NETDEV_UP, true);
3262 netdev_turn_flags_off(bond_netdev, NETDEV_UP, true);
3264 netdev_close(bond_netdev);
3268 proc_net_compat_update_bond(port->name, &bond);
3273 port_update_vlan_compat(struct port *port)
3275 struct bridge *br = port->bridge;
3276 char *vlandev_name = NULL;
3278 if (port->vlan > 0) {
3279 /* Figure out the name that the VLAN device should actually have, if it
3280 * existed. This takes some work because the VLAN device would not
3281 * have port->name in its name; rather, it would have the trunk port's
3282 * name, and 'port' would be attached to a bridge that also had the
3283 * VLAN device one of its ports. So we need to find a trunk port that
3284 * includes port->vlan.
3286 * There might be more than one candidate. This doesn't happen on
3287 * XenServer, so if it happens we just pick the first choice in
3288 * alphabetical order instead of creating multiple VLAN devices. */
3290 for (i = 0; i < br->n_ports; i++) {
3291 struct port *p = br->ports[i];
3292 if (port_trunks_vlan(p, port->vlan)
3294 && (!vlandev_name || strcmp(p->name, vlandev_name) <= 0))
3296 uint8_t ea[ETH_ADDR_LEN];
3297 netdev_get_etheraddr(p->ifaces[0]->netdev, ea);
3298 if (!eth_addr_is_multicast(ea) &&
3299 !eth_addr_is_reserved(ea) &&
3300 !eth_addr_is_zero(ea)) {
3301 vlandev_name = p->name;
3306 proc_net_compat_update_vlan(port->name, vlandev_name, port->vlan);
3309 /* Interface functions. */
3311 static struct iface *
3312 iface_create(struct port *port, const struct ovsrec_interface *if_cfg)
3314 struct iface *iface;
3315 char *name = if_cfg->name;
3318 iface = xzalloc(sizeof *iface);
3320 iface->port_ifidx = port->n_ifaces;
3321 iface->name = xstrdup(name);
3322 iface->dp_ifidx = -1;
3323 iface->tag = tag_create_random();
3324 iface->delay_expires = LLONG_MAX;
3325 iface->netdev = NULL;
3327 if (port->n_ifaces >= port->allocated_ifaces) {
3328 port->ifaces = x2nrealloc(port->ifaces, &port->allocated_ifaces,
3329 sizeof *port->ifaces);
3331 port->ifaces[port->n_ifaces++] = iface;
3332 if (port->n_ifaces > 1) {
3333 port->bridge->has_bonded_ports = true;
3336 /* Attempt to create the network interface in case it
3337 * doesn't exist yet. */
3338 error = set_up_iface(if_cfg, true);
3340 VLOG_WARN("could not create iface %s: %s\n", iface->name,
3344 VLOG_DBG("attached network device %s to port %s", iface->name, port->name);
3346 bridge_flush(port->bridge);
3352 iface_destroy(struct iface *iface)
3355 struct port *port = iface->port;
3356 struct bridge *br = port->bridge;
3357 bool del_active = port->active_iface == iface->port_ifidx;
3360 if (iface->dp_ifidx >= 0) {
3361 port_array_set(&br->ifaces, iface->dp_ifidx, NULL);
3364 del = port->ifaces[iface->port_ifidx] = port->ifaces[--port->n_ifaces];
3365 del->port_ifidx = iface->port_ifidx;
3367 netdev_close(iface->netdev);
3370 ofproto_revalidate(port->bridge->ofproto, port->active_iface_tag);
3371 bond_choose_active_iface(port);
3372 bond_send_learning_packets(port);
3375 netdev_destroy(iface->name);
3379 bridge_flush(port->bridge);
3383 static struct iface *
3384 iface_lookup(const struct bridge *br, const char *name)
3388 for (i = 0; i < br->n_ports; i++) {
3389 struct port *port = br->ports[i];
3390 for (j = 0; j < port->n_ifaces; j++) {
3391 struct iface *iface = port->ifaces[j];
3392 if (!strcmp(iface->name, name)) {
3400 static struct iface *
3401 iface_from_dp_ifidx(const struct bridge *br, uint16_t dp_ifidx)
3403 return port_array_get(&br->ifaces, dp_ifidx);
3406 /* Returns true if 'iface' is the name of an "internal" interface on bridge
3407 * 'br', that is, an interface that is entirely simulated within the datapath.
3408 * The local port (ODPP_LOCAL) is always an internal interface. Other local
3409 * interfaces are created by setting "iface.<iface>.internal = true".
3411 * In addition, we have a kluge-y feature that creates an internal port with
3412 * the name of a bonded port if "bonding.<bondname>.fake-iface = true" is set.
3413 * This feature needs to go away in the long term. Until then, this is one
3414 * reason why this function takes a name instead of a struct iface: the fake
3415 * interfaces created this way do not have a struct iface. */
3417 iface_is_internal(const struct bridge *br, const char *if_name)
3419 /* XXX wastes time */
3420 struct iface *iface;
3423 if (!strcmp(if_name, br->name)) {
3427 iface = iface_lookup(br, if_name);
3428 if (iface && !strcmp(iface->cfg->type, "internal")) {
3432 port = port_lookup(br, if_name);
3433 if (port->n_ifaces > 1 && port->cfg->bond_fake_iface) {
3439 /* Set Ethernet address of 'iface', if one is specified in the configuration
3442 iface_set_mac(struct iface *iface)
3444 uint8_t ea[ETH_ADDR_LEN];
3446 if (iface->cfg->mac && eth_addr_from_string(iface->cfg->mac, ea)) {
3447 if (eth_addr_is_multicast(ea)) {
3448 VLOG_ERR("interface %s: cannot set MAC to multicast address",
3450 } else if (iface->dp_ifidx == ODPP_LOCAL) {
3451 VLOG_ERR("ignoring iface.%s.mac; use bridge.%s.mac instead",
3452 iface->name, iface->name);
3454 int error = netdev_set_etheraddr(iface->netdev, ea);
3456 VLOG_ERR("interface %s: setting MAC failed (%s)",
3457 iface->name, strerror(error));
3463 /* Port mirroring. */
3467 mirror_reconfigure(struct bridge *br UNUSED)
3469 struct svec old_mirrors, new_mirrors;
3470 size_t i, n_rspan_vlans;
3471 unsigned long *rspan_vlans;
3473 /* Collect old and new mirrors. */
3474 svec_init(&old_mirrors);
3475 svec_init(&new_mirrors);
3476 cfg_get_subsections(&new_mirrors, "mirror.%s", br->name);
3477 for (i = 0; i < MAX_MIRRORS; i++) {
3478 if (br->mirrors[i]) {
3479 svec_add(&old_mirrors, br->mirrors[i]->name);
3483 /* Get rid of deleted mirrors and add new mirrors. */
3484 svec_sort(&old_mirrors);
3485 assert(svec_is_unique(&old_mirrors));
3486 svec_sort(&new_mirrors);
3487 assert(svec_is_unique(&new_mirrors));
3488 for (i = 0; i < MAX_MIRRORS; i++) {
3489 struct mirror *m = br->mirrors[i];
3490 if (m && !svec_contains(&new_mirrors, m->name)) {
3494 for (i = 0; i < new_mirrors.n; i++) {
3495 const char *name = new_mirrors.names[i];
3496 if (!svec_contains(&old_mirrors, name)) {
3497 mirror_create(br, name);
3500 svec_destroy(&old_mirrors);
3501 svec_destroy(&new_mirrors);
3503 /* Reconfigure all mirrors. */
3504 for (i = 0; i < MAX_MIRRORS; i++) {
3505 if (br->mirrors[i]) {
3506 mirror_reconfigure_one(br->mirrors[i]);
3510 /* Update port reserved status. */
3511 for (i = 0; i < br->n_ports; i++) {
3512 br->ports[i]->is_mirror_output_port = false;
3514 for (i = 0; i < MAX_MIRRORS; i++) {
3515 struct mirror *m = br->mirrors[i];
3516 if (m && m->out_port) {
3517 m->out_port->is_mirror_output_port = true;
3521 /* Update learning disabled vlans (for RSPAN). */
3523 n_rspan_vlans = cfg_count("vlan.%s.disable-learning", br->name);
3524 if (n_rspan_vlans) {
3525 rspan_vlans = bitmap_allocate(4096);
3527 for (i = 0; i < n_rspan_vlans; i++) {
3528 int vlan = cfg_get_vlan(i, "vlan.%s.disable-learning", br->name);
3530 bitmap_set1(rspan_vlans, vlan);
3531 VLOG_INFO("bridge %s: disabling learning on vlan %d\n",
3534 VLOG_ERR("bridge %s: invalid value '%s' for learning disabled "
3536 cfg_get_string(i, "vlan.%s.disable-learning", br->name));
3540 if (mac_learning_set_disabled_vlans(br->ml, rspan_vlans)) {
3546 mirror_create(struct bridge *br, const char *name)
3551 for (i = 0; ; i++) {
3552 if (i >= MAX_MIRRORS) {
3553 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
3554 "cannot create %s", br->name, MAX_MIRRORS, name);
3557 if (!br->mirrors[i]) {
3562 VLOG_INFO("created port mirror %s on bridge %s", name, br->name);
3565 br->mirrors[i] = m = xzalloc(sizeof *m);
3568 m->name = xstrdup(name);
3569 svec_init(&m->src_ports);
3570 svec_init(&m->dst_ports);
3578 mirror_destroy(struct mirror *m)
3581 struct bridge *br = m->bridge;
3584 for (i = 0; i < br->n_ports; i++) {
3585 br->ports[i]->src_mirrors &= ~(MIRROR_MASK_C(1) << m->idx);
3586 br->ports[i]->dst_mirrors &= ~(MIRROR_MASK_C(1) << m->idx);
3589 svec_destroy(&m->src_ports);
3590 svec_destroy(&m->dst_ports);
3593 m->bridge->mirrors[m->idx] = NULL;
3601 prune_ports(struct mirror *m, struct svec *ports)
3606 svec_sort_unique(ports);
3609 for (i = 0; i < ports->n; i++) {
3610 const char *name = ports->names[i];
3611 if (port_lookup(m->bridge, name)) {
3612 svec_add(&tmp, name);
3614 VLOG_WARN("mirror.%s.%s: cannot match on nonexistent port %s",
3615 m->bridge->name, m->name, name);
3618 svec_swap(ports, &tmp);
3623 prune_vlans(struct mirror *m, struct svec *vlan_strings, int **vlans)
3627 /* This isn't perfect: it won't combine "0" and "00", and the textual sort
3628 * order won't give us numeric sort order. But that's good enough for what
3629 * we need right now. */
3630 svec_sort_unique(vlan_strings);
3632 *vlans = xmalloc(sizeof *vlans * vlan_strings->n);
3634 for (i = 0; i < vlan_strings->n; i++) {
3635 const char *name = vlan_strings->names[i];
3637 if (!str_to_int(name, 10, &vlan) || vlan < 0 || vlan > 4095) {
3638 VLOG_WARN("mirror.%s.%s.select.vlan: ignoring invalid VLAN %s",
3639 m->bridge->name, m->name, name);
3641 (*vlans)[n_vlans++] = vlan;
3648 vlan_is_mirrored(const struct mirror *m, int vlan)
3652 for (i = 0; i < m->n_vlans; i++) {
3653 if (m->vlans[i] == vlan) {
3661 port_trunks_any_mirrored_vlan(const struct mirror *m, const struct port *p)
3665 for (i = 0; i < m->n_vlans; i++) {
3666 if (port_trunks_vlan(p, m->vlans[i])) {
3674 mirror_reconfigure_one(struct mirror *m UNUSED)
3676 char *pfx = xasprintf("mirror.%s.%s", m->bridge->name, m->name);
3677 struct svec src_ports, dst_ports, ports;
3678 struct svec vlan_strings;
3679 mirror_mask_t mirror_bit;
3680 const char *out_port_name;
3681 struct port *out_port;
3686 bool mirror_all_ports;
3687 bool any_ports_specified;
3689 /* Get output port. */
3690 out_port_name = cfg_get_key(0, "mirror.%s.%s.output.port",
3691 m->bridge->name, m->name);
3692 if (out_port_name) {
3693 out_port = port_lookup(m->bridge, out_port_name);
3695 VLOG_ERR("%s.output.port: bridge %s does not have a port "
3696 "named %s", pfx, m->bridge->name, out_port_name);
3703 if (cfg_has("%s.output.vlan", pfx)) {
3704 VLOG_ERR("%s.output.port and %s.output.vlan both specified; "
3705 "ignoring %s.output.vlan", pfx, pfx, pfx);
3707 } else if (cfg_has("%s.output.vlan", pfx)) {
3709 out_vlan = cfg_get_vlan(0, "%s.output.vlan", pfx);
3711 VLOG_ERR("%s: neither %s.output.port nor %s.output.vlan specified, "
3712 "but exactly one is required; disabling port mirror %s",
3713 pfx, pfx, pfx, pfx);
3719 /* Get all the ports, and drop duplicates and ports that don't exist. */
3720 svec_init(&src_ports);
3721 svec_init(&dst_ports);
3723 cfg_get_all_keys(&src_ports, "%s.select.src-port", pfx);
3724 cfg_get_all_keys(&dst_ports, "%s.select.dst-port", pfx);
3725 cfg_get_all_keys(&ports, "%s.select.port", pfx);
3726 any_ports_specified = src_ports.n || dst_ports.n || ports.n;
3727 svec_append(&src_ports, &ports);
3728 svec_append(&dst_ports, &ports);
3729 svec_destroy(&ports);
3730 prune_ports(m, &src_ports);
3731 prune_ports(m, &dst_ports);
3732 if (any_ports_specified && !src_ports.n && !dst_ports.n) {
3733 VLOG_ERR("%s: none of the specified ports exist; "
3734 "disabling port mirror %s", pfx, pfx);
3739 /* Get all the vlans, and drop duplicate and invalid vlans. */
3740 svec_init(&vlan_strings);
3741 cfg_get_all_keys(&vlan_strings, "%s.select.vlan", pfx);
3742 n_vlans = prune_vlans(m, &vlan_strings, &vlans);
3743 svec_destroy(&vlan_strings);
3745 /* Update mirror data. */
3746 if (!svec_equal(&m->src_ports, &src_ports)
3747 || !svec_equal(&m->dst_ports, &dst_ports)
3748 || m->n_vlans != n_vlans
3749 || memcmp(m->vlans, vlans, sizeof *vlans * n_vlans)
3750 || m->out_port != out_port
3751 || m->out_vlan != out_vlan) {
3752 bridge_flush(m->bridge);
3754 svec_swap(&m->src_ports, &src_ports);
3755 svec_swap(&m->dst_ports, &dst_ports);
3758 m->n_vlans = n_vlans;
3759 m->out_port = out_port;
3760 m->out_vlan = out_vlan;
3762 /* If no selection criteria have been given, mirror for all ports. */
3763 mirror_all_ports = (!m->src_ports.n) && (!m->dst_ports.n) && (!m->n_vlans);
3766 mirror_bit = MIRROR_MASK_C(1) << m->idx;
3767 for (i = 0; i < m->bridge->n_ports; i++) {
3768 struct port *port = m->bridge->ports[i];
3770 if (mirror_all_ports
3771 || svec_contains(&m->src_ports, port->name)
3774 ? port_trunks_any_mirrored_vlan(m, port)
3775 : vlan_is_mirrored(m, port->vlan)))) {
3776 port->src_mirrors |= mirror_bit;
3778 port->src_mirrors &= ~mirror_bit;
3781 if (mirror_all_ports || svec_contains(&m->dst_ports, port->name)) {
3782 port->dst_mirrors |= mirror_bit;
3784 port->dst_mirrors &= ~mirror_bit;
3790 svec_destroy(&src_ports);
3791 svec_destroy(&dst_ports);