+ if (VLOG_IS_DBG_ENABLED()) {
+ bool was_enabled = old->port_no != htons(OFPP_NONE);
+ bool now_enabled = new->port_no != htons(OFPP_NONE);
+ uint32_t curr = ntohl(new->curr);
+ uint32_t supported = ntohl(new->supported);
+ struct ds ds;
+
+ if (((old->config != new->config) || (old->state != new->state))
+ && opp_differs(old, new) == 1) {
+ /* Don't care if only flags changed. */
+ return;
+ }
+
+ ds_init(&ds);
+ ds_put_format(&ds, "\"%s\", "ETH_ADDR_FMT, new->name,
+ ETH_ADDR_ARGS(new->hw_addr));
+ if (curr) {
+ put_features(&ds, ", current", curr);
+ }
+ if (supported) {
+ put_features(&ds, ", supports", supported);
+ }
+ if (was_enabled != now_enabled) {
+ if (now_enabled) {
+ VLOG_DBG("Port %d added: %s", port_no, ds_cstr(&ds));
+ } else {
+ VLOG_DBG("Port %d deleted", port_no);
+ }
+ } else {
+ VLOG_DBG("Port %d changed: %s", port_no, ds_cstr(&ds));
+ }
+ ds_destroy(&ds);
+ }
+}
+
+static void
+port_watcher_register_callback(struct port_watcher *pw,
+ port_changed_cb_func *port_changed,
+ void *aux)
+{
+ assert(pw->n_cbs < ARRAY_SIZE(pw->cbs));
+ pw->cbs[pw->n_cbs].port_changed = port_changed;
+ pw->cbs[pw->n_cbs].aux = aux;
+ pw->n_cbs++;
+}
+
+static void
+port_watcher_register_local_port_callback(struct port_watcher *pw,
+ local_port_changed_cb_func *cb,
+ void *aux)
+{
+ assert(pw->n_local_cbs < ARRAY_SIZE(pw->local_cbs));
+ pw->local_cbs[pw->n_local_cbs].local_port_changed = cb;
+ pw->local_cbs[pw->n_local_cbs].aux = aux;
+ pw->n_local_cbs++;
+}
+
+static uint32_t
+port_watcher_get_config(const struct port_watcher *pw, int port_no)
+{
+ int idx = port_no_to_pw_idx(port_no);
+ return idx >= 0 ? ntohl(pw->ports[idx].config) : 0;
+}
+
+static void
+port_watcher_set_flags(struct port_watcher *pw, int port_no,
+ uint32_t config, uint32_t c_mask,
+ uint32_t state, uint32_t s_mask)
+{
+ struct ofp_phy_port old;
+ struct ofp_phy_port *p;
+ struct ofp_port_mod *opm;
+ struct ofp_port_status *ops;
+ struct ofpbuf *b;
+ int idx;
+
+ idx = port_no_to_pw_idx(port_no);
+ if (idx < 0) {
+ return;
+ }
+
+ p = &pw->ports[idx];
+ if (!((ntohl(p->state) ^ state) & s_mask)
+ && (!((ntohl(p->config) ^ config) & c_mask))) {
+ return;
+ }
+ old = *p;
+
+ /* Update our idea of the flags. */
+ p->config = htonl((ntohl(p->config) & ~c_mask) | (config & c_mask));
+ p->state = htonl((ntohl(p->state) & ~s_mask) | (state & s_mask));
+ call_port_changed_callbacks(pw, port_no, &old, p);
+
+ /* Change the flags in the datapath. */
+ opm = make_openflow(sizeof *opm, OFPT_PORT_MOD, &b);
+ opm->port_no = p->port_no;
+ memcpy(opm->hw_addr, p->hw_addr, OFP_ETH_ALEN);
+ opm->config = p->config;
+ opm->mask = htonl(c_mask);
+ opm->advertise = htonl(0);
+ rconn_send(pw->local_rconn, b, NULL);
+
+ /* Notify the controller that the flags changed. */
+ ops = make_openflow(sizeof *ops, OFPT_PORT_STATUS, &b);
+ ops->reason = OFPPR_MODIFY;
+ ops->desc = *p;
+ rconn_send(pw->remote_rconn, b, NULL);
+}
+
+static bool
+port_watcher_is_ready(const struct port_watcher *pw)
+{
+ return pw->got_feature_reply;
+}
+
+static struct hook
+port_watcher_create(struct rconn *local_rconn, struct rconn *remote_rconn,
+ struct port_watcher **pwp)
+{
+ struct port_watcher *pw;
+ int i;
+
+ pw = *pwp = xcalloc(1, sizeof *pw);
+ pw->local_rconn = local_rconn;
+ pw->remote_rconn = remote_rconn;
+ pw->last_feature_request = TIME_MIN;
+ for (i = 0; i < OFPP_MAX; i++) {
+ pw->ports[i].port_no = htons(OFPP_NONE);
+ }
+ pw->local_port_name[0] = '\0';
+ port_watcher_register_callback(pw, log_port_status, NULL);
+ return make_hook(port_watcher_local_packet_cb,
+ port_watcher_remote_packet_cb,
+ port_watcher_periodic_cb,
+ port_watcher_wait_cb, pw);
+}
+\f
+/* Spanning tree protocol. */
+
+/* Extra time, in seconds, at boot before going into fail-open, to give the
+ * spanning tree protocol time to figure out the network layout. */
+#define STP_EXTRA_BOOT_TIME 30
+
+struct stp_data {
+ struct stp *stp;
+ struct port_watcher *pw;
+ struct rconn *local_rconn;
+ struct rconn *remote_rconn;
+ long long int last_tick_256ths;
+ int n_txq;
+};
+
+static bool
+stp_local_packet_cb(struct relay *r, void *stp_)
+{
+ struct ofpbuf *msg = r->halves[HALF_LOCAL].rxbuf;
+ struct ofp_header *oh;
+ struct stp_data *stp = stp_;
+ struct ofp_packet_in *opi;
+ struct eth_header *eth;
+ struct llc_header *llc;
+ struct ofpbuf payload;
+ uint16_t port_no;
+ struct flow flow;
+
+ oh = msg->data;
+ if (oh->type == OFPT_FEATURES_REPLY
+ && msg->size >= offsetof(struct ofp_switch_features, ports)) {
+ struct ofp_switch_features *osf = msg->data;
+ osf->capabilities |= htonl(OFPC_STP);
+ return false;
+ }
+
+ if (!get_ofp_packet_eth_header(r, &opi, ð)
+ || !eth_addr_equals(eth->eth_dst, stp_eth_addr)) {
+ return false;
+ }
+
+ port_no = ntohs(opi->in_port);
+ if (port_no >= STP_MAX_PORTS) {
+ /* STP only supports 255 ports. */
+ return false;
+ }
+ if (port_watcher_get_config(stp->pw, port_no) & OFPPC_NO_STP) {
+ /* We're not doing STP on this port. */
+ return false;
+ }
+
+ if (opi->reason == OFPR_ACTION) {
+ /* The controller set up a flow for this, so we won't intercept it. */
+ return false;
+ }
+
+ get_ofp_packet_payload(opi, &payload);
+ flow_extract(&payload, port_no, &flow);
+ if (flow.dl_type != htons(OFP_DL_TYPE_NOT_ETH_TYPE)) {
+ VLOG_DBG("non-LLC frame received on STP multicast address");
+ return false;
+ }
+ llc = ofpbuf_at_assert(&payload, sizeof *eth, sizeof *llc);
+ if (llc->llc_dsap != STP_LLC_DSAP) {
+ VLOG_DBG("bad DSAP 0x%02"PRIx8" received on STP multicast address",
+ llc->llc_dsap);
+ return false;
+ }
+
+ /* Trim off padding on payload. */
+ if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ }
+ if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
+ struct stp_port *p = stp_get_port(stp->stp, port_no);
+ stp_received_bpdu(p, payload.data, payload.size);
+ }
+
+ return true;
+}
+
+static long long int
+time_256ths(void)
+{
+ return time_msec() * 256 / 1000;
+}
+
+static void
+stp_periodic_cb(void *stp_)
+{
+ struct stp_data *stp = stp_;
+ long long int now_256ths = time_256ths();
+ long long int elapsed_256ths = now_256ths - stp->last_tick_256ths;
+ struct stp_port *p;
+
+ if (!port_watcher_is_ready(stp->pw)) {
+ /* Can't start STP until we know port flags, because port flags can
+ * disable STP. */
+ return;
+ }
+ if (elapsed_256ths <= 0) {
+ return;
+ }
+
+ stp_tick(stp->stp, MIN(INT_MAX, elapsed_256ths));
+ stp->last_tick_256ths = now_256ths;
+
+ while (stp_get_changed_port(stp->stp, &p)) {
+ int port_no = stp_port_no(p);
+ enum stp_state s_state = stp_port_get_state(p);
+
+ if (s_state != STP_DISABLED) {
+ VLOG_WARN("STP: Port %d entered %s state",
+ port_no, stp_state_name(s_state));
+ }
+ if (!(port_watcher_get_config(stp->pw, port_no) & OFPPC_NO_STP)) {
+ uint32_t p_config = 0;
+ uint32_t p_state;
+ switch (s_state) {
+ case STP_LISTENING:
+ p_state = OFPPS_STP_LISTEN;
+ break;
+ case STP_LEARNING:
+ p_state = OFPPS_STP_LEARN;
+ break;
+ case STP_DISABLED:
+ case STP_FORWARDING:
+ p_state = OFPPS_STP_FORWARD;
+ break;
+ case STP_BLOCKING:
+ p_state = OFPPS_STP_BLOCK;
+ break;
+ default:
+ VLOG_DBG_RL(&vrl, "STP: Port %d has bad state %x",
+ port_no, s_state);
+ p_state = OFPPS_STP_FORWARD;
+ break;
+ }
+ if (!stp_forward_in_state(s_state)) {
+ p_config = OFPPC_NO_FLOOD;
+ }
+ port_watcher_set_flags(stp->pw, port_no,
+ p_config, OFPPC_NO_FLOOD,
+ p_state, OFPPS_STP_MASK);
+ } else {
+ /* We don't own those flags. */
+ }
+ }
+}
+
+static void
+stp_wait_cb(void *stp_ UNUSED)
+{
+ poll_timer_wait(1000);
+}
+
+static void
+send_bpdu(const void *bpdu, size_t bpdu_size, int port_no, void *stp_)
+{
+ struct stp_data *stp = stp_;
+ struct eth_header *eth;
+ struct llc_header *llc;
+ struct ofpbuf pkt, *opo;
+
+ /* Packet skeleton. */
+ ofpbuf_init(&pkt, ETH_HEADER_LEN + LLC_HEADER_LEN + bpdu_size);
+ eth = ofpbuf_put_uninit(&pkt, sizeof *eth);
+ llc = ofpbuf_put_uninit(&pkt, sizeof *llc);
+ ofpbuf_put(&pkt, bpdu, bpdu_size);
+
+ /* 802.2 header. */
+ memcpy(eth->eth_dst, stp_eth_addr, ETH_ADDR_LEN);
+ memcpy(eth->eth_src, stp->pw->ports[port_no].hw_addr, ETH_ADDR_LEN);
+ eth->eth_type = htons(pkt.size - ETH_HEADER_LEN);
+
+ /* LLC header. */
+ llc->llc_dsap = STP_LLC_DSAP;
+ llc->llc_ssap = STP_LLC_SSAP;
+ llc->llc_cntl = STP_LLC_CNTL;
+
+ opo = make_unbuffered_packet_out(&pkt, OFPP_NONE, port_no);
+ ofpbuf_uninit(&pkt);
+ rconn_send_with_limit(stp->local_rconn, opo, &stp->n_txq, OFPP_MAX);
+}
+
+static bool
+stp_is_port_supported(uint16_t port_no)
+{
+ /* We should be able to support STP on all possible OpenFlow physical
+ * ports. (But we don't support STP on OFPP_LOCAL.) */
+ BUILD_ASSERT_DECL(STP_MAX_PORTS >= OFPP_MAX);
+ return port_no < STP_MAX_PORTS;
+}
+
+static void
+stp_port_changed_cb(uint16_t port_no,
+ const struct ofp_phy_port *old,
+ const struct ofp_phy_port *new,
+ void *stp_)
+{
+ struct stp_data *stp = stp_;
+ struct stp_port *p;
+
+ if (!stp_is_port_supported(port_no)) {
+ return;
+ }
+
+ p = stp_get_port(stp->stp, port_no);
+ if (new->port_no == htons(OFPP_NONE)
+ || new->config & htonl(OFPPC_NO_STP | OFPPC_PORT_DOWN)
+ || new->state & htonl(OFPPS_LINK_DOWN)) {
+ stp_port_disable(p);
+ } else {
+ int speed = 0;
+ stp_port_enable(p);
+ if (new->curr & (OFPPF_10MB_HD | OFPPF_10MB_FD)) {
+ speed = 10;
+ } else if (new->curr & (OFPPF_100MB_HD | OFPPF_100MB_FD)) {
+ speed = 100;
+ } else if (new->curr & (OFPPF_1GB_HD | OFPPF_1GB_FD)) {
+ speed = 1000;
+ } else if (new->curr & OFPPF_100MB_FD) {
+ speed = 10000;
+ }
+ stp_port_set_speed(p, speed);
+ }
+}
+
+static void
+stp_local_port_changed_cb(const struct ofp_phy_port *port, void *stp_)
+{
+ struct stp_data *stp = stp_;
+ if (port) {
+ stp_set_bridge_id(stp->stp, eth_addr_to_uint64(port->hw_addr));
+ }
+}
+
+static struct hook
+stp_hook_create(const struct settings *s, struct port_watcher *pw,
+ struct rconn *local, struct rconn *remote)
+{
+ uint8_t dpid[ETH_ADDR_LEN];
+ struct stp_data *stp;
+
+ stp = xcalloc(1, sizeof *stp);
+ eth_addr_random(dpid);
+ stp->stp = stp_create("stp", eth_addr_to_uint64(dpid), send_bpdu, stp);
+ stp->pw = pw;
+ stp->local_rconn = local;
+ stp->remote_rconn = remote;
+ stp->last_tick_256ths = time_256ths();
+
+ port_watcher_register_callback(pw, stp_port_changed_cb, stp);
+ port_watcher_register_local_port_callback(pw, stp_local_port_changed_cb,
+ stp);
+ return make_hook(stp_local_packet_cb, NULL,
+ stp_periodic_cb, stp_wait_cb, stp);
+}
+\f
+/* In-band control. */
+
+struct in_band_data {
+ const struct settings *s;
+ struct mac_learning *ml;
+ struct netdev *of_device;
+ struct rconn *controller;
+ int n_queued;
+};
+
+static void
+queue_tx(struct rconn *rc, struct in_band_data *in_band, struct ofpbuf *b)
+{
+ rconn_send_with_limit(rc, b, &in_band->n_queued, 10);
+}
+
+static const uint8_t *
+get_controller_mac(struct in_band_data *in_band)
+{
+ static uint32_t ip, last_nonzero_ip;
+ static uint8_t mac[ETH_ADDR_LEN], last_nonzero_mac[ETH_ADDR_LEN];
+ static time_t next_refresh = 0;
+
+ uint32_t last_ip = ip;
+
+ time_t now = time_now();
+
+ ip = rconn_get_ip(in_band->controller);
+ if (last_ip != ip || !next_refresh || now >= next_refresh) {
+ bool have_mac;
+
+ /* Look up MAC address. */
+ memset(mac, 0, sizeof mac);
+ if (ip && in_band->of_device) {
+ int retval = netdev_arp_lookup(in_band->of_device, ip, mac);
+ if (retval) {
+ VLOG_DBG_RL(&vrl, "cannot look up controller hw address "
+ "("IP_FMT"): %s", IP_ARGS(&ip), strerror(retval));
+ }
+ }
+ have_mac = !eth_addr_is_zero(mac);
+
+ /* Log changes in IP, MAC addresses. */
+ if (ip && ip != last_nonzero_ip) {
+ VLOG_DBG("controller IP address changed from "IP_FMT
+ " to "IP_FMT, IP_ARGS(&last_nonzero_ip), IP_ARGS(&ip));
+ last_nonzero_ip = ip;
+ }
+ if (have_mac && memcmp(last_nonzero_mac, mac, ETH_ADDR_LEN)) {
+ VLOG_DBG("controller MAC address changed from "ETH_ADDR_FMT" to "
+ ETH_ADDR_FMT,
+ ETH_ADDR_ARGS(last_nonzero_mac), ETH_ADDR_ARGS(mac));
+ memcpy(last_nonzero_mac, mac, ETH_ADDR_LEN);
+ }
+
+ /* Schedule next refresh.
+ *
+ * If we have an IP address but not a MAC address, then refresh
+ * quickly, since we probably will get a MAC address soon (via ARP).
+ * Otherwise, we can afford to wait a little while. */
+ next_refresh = now + (!ip || have_mac ? 10 : 1);
+ }
+ return !eth_addr_is_zero(mac) ? mac : NULL;
+}
+
+static bool
+is_controller_mac(const uint8_t dl_addr[ETH_ADDR_LEN],
+ struct in_band_data *in_band)
+{
+ const uint8_t *mac = get_controller_mac(in_band);
+ return mac && eth_addr_equals(mac, dl_addr);
+}
+
+static void
+in_band_learn_mac(struct in_band_data *in_band,
+ uint16_t in_port, const uint8_t src_mac[ETH_ADDR_LEN])
+{
+ if (mac_learning_learn(in_band->ml, src_mac, in_port)) {
+ VLOG_DBG_RL(&vrl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
+ ETH_ADDR_ARGS(src_mac), in_port);
+ }
+}
+
+static bool
+in_band_local_packet_cb(struct relay *r, void *in_band_)
+{
+ struct in_band_data *in_band = in_band_;
+ struct rconn *rc = r->halves[HALF_LOCAL].rconn;
+ struct ofp_packet_in *opi;
+ struct eth_header *eth;
+ struct ofpbuf payload;
+ struct flow flow;
+ uint16_t in_port;
+ int out_port;
+
+ if (!get_ofp_packet_eth_header(r, &opi, ð) || !in_band->of_device) {
+ return false;
+ }
+ in_port = ntohs(opi->in_port);
+
+ /* Deal with local stuff. */
+ if (in_port == OFPP_LOCAL) {
+ /* Sent by secure channel. */
+ out_port = mac_learning_lookup(in_band->ml, eth->eth_dst);
+ } else if (eth_addr_equals(eth->eth_dst,
+ netdev_get_etheraddr(in_band->of_device))) {
+ /* Sent to secure channel. */
+ out_port = OFPP_LOCAL;
+ in_band_learn_mac(in_band, in_port, eth->eth_src);
+ } else if (eth->eth_type == htons(ETH_TYPE_ARP)
+ && eth_addr_is_broadcast(eth->eth_dst)
+ && is_controller_mac(eth->eth_src, in_band)) {
+ /* ARP sent by controller. */
+ out_port = OFPP_FLOOD;
+ } else if (is_controller_mac(eth->eth_dst, in_band)
+ || is_controller_mac(eth->eth_src, in_band)) {
+ /* Traffic to or from controller. Switch it by hand. */
+ in_band_learn_mac(in_band, in_port, eth->eth_src);
+ out_port = mac_learning_lookup(in_band->ml, eth->eth_dst);
+ } else {
+ const uint8_t *controller_mac;
+ controller_mac = get_controller_mac(in_band);
+ if (eth->eth_type == htons(ETH_TYPE_ARP)
+ && eth_addr_is_broadcast(eth->eth_dst)
+ && is_controller_mac(eth->eth_src, in_band)) {
+ /* ARP sent by controller. */
+ out_port = OFPP_FLOOD;
+ } else if (is_controller_mac(eth->eth_dst, in_band)
+ && in_port == mac_learning_lookup(in_band->ml,
+ controller_mac)) {
+ /* Drop controller traffic that arrives on the controller port. */
+ out_port = -1;
+ } else {
+ return false;
+ }
+ }
+
+ get_ofp_packet_payload(opi, &payload);
+ flow_extract(&payload, in_port, &flow);
+ if (in_port == out_port) {
+ /* The input and output port match. Set up a flow to drop packets. */
+ queue_tx(rc, in_band, make_add_flow(&flow, ntohl(opi->buffer_id),
+ in_band->s->max_idle, 0));
+ } else if (out_port != OFPP_FLOOD) {
+ /* The output port is known, so add a new flow. */
+ queue_tx(rc, in_band,
+ make_add_simple_flow(&flow, ntohl(opi->buffer_id),
+ out_port, in_band->s->max_idle));
+
+ /* If the switch didn't buffer the packet, we need to send a copy. */
+ if (ntohl(opi->buffer_id) == UINT32_MAX) {
+ queue_tx(rc, in_band,
+ make_unbuffered_packet_out(&payload, in_port, out_port));
+ }
+ } else {
+ /* We don't know that MAC. Send along the packet without setting up a
+ * flow. */
+ struct ofpbuf *b;
+ if (ntohl(opi->buffer_id) == UINT32_MAX) {
+ b = make_unbuffered_packet_out(&payload, in_port, out_port);
+ } else {
+ b = make_buffered_packet_out(ntohl(opi->buffer_id),
+ in_port, out_port);
+ }
+ queue_tx(rc, in_band, b);
+ }
+ return true;
+}
+
+static void
+in_band_status_cb(struct status_reply *sr, void *in_band_)
+{
+ struct in_band_data *in_band = in_band_;
+ struct in_addr local_ip;
+ uint32_t controller_ip;
+ const uint8_t *controller_mac;
+
+ if (in_band->of_device) {
+ const uint8_t *mac = netdev_get_etheraddr(in_band->of_device);
+ if (netdev_get_in4(in_band->of_device, &local_ip)) {
+ status_reply_put(sr, "local-ip="IP_FMT, IP_ARGS(&local_ip.s_addr));
+ }
+ status_reply_put(sr, "local-mac="ETH_ADDR_FMT, ETH_ADDR_ARGS(mac));
+
+ controller_ip = rconn_get_ip(in_band->controller);
+ if (controller_ip) {
+ status_reply_put(sr, "controller-ip="IP_FMT,
+ IP_ARGS(&controller_ip));
+ }
+ controller_mac = get_controller_mac(in_band);
+ if (controller_mac) {
+ status_reply_put(sr, "controller-mac="ETH_ADDR_FMT,
+ ETH_ADDR_ARGS(controller_mac));
+ }
+ }
+}
+
+static void
+get_ofp_packet_payload(struct ofp_packet_in *opi, struct ofpbuf *payload)
+{
+ payload->data = opi->data;
+ payload->size = ntohs(opi->header.length) - offsetof(struct ofp_packet_in,
+ data);
+}
+
+static void
+in_band_local_port_cb(const struct ofp_phy_port *port, void *in_band_)
+{
+ struct in_band_data *in_band = in_band_;
+ if (port) {
+ char name[sizeof port->name + 1];
+ get_port_name(port, name, sizeof name);
+
+ if (!in_band->of_device
+ || strcmp(netdev_get_name(in_band->of_device), name))
+ {
+ int error;
+ netdev_close(in_band->of_device);
+ error = netdev_open(name, NETDEV_ETH_TYPE_NONE,
+ &in_band->of_device);
+ if (error) {
+ VLOG_ERR("failed to open in-band control network device "
+ "\"%s\": %s", name, strerror(errno));
+ }
+ }
+ } else {
+ netdev_close(in_band->of_device);
+ in_band->of_device = NULL;
+ }
+}
+
+static struct hook
+in_band_hook_create(const struct settings *s, struct switch_status *ss,
+ struct port_watcher *pw, struct rconn *remote)
+{
+ struct in_band_data *in_band;
+
+ in_band = xcalloc(1, sizeof *in_band);
+ in_band->s = s;
+ in_band->ml = mac_learning_create();
+ in_band->of_device = NULL;
+ in_band->controller = remote;
+ switch_status_register_category(ss, "in-band", in_band_status_cb, in_band);
+ port_watcher_register_local_port_callback(pw, in_band_local_port_cb,
+ in_band);
+ return make_hook(in_band_local_packet_cb, NULL, NULL, NULL, in_band);
+}
+\f
+/* Fail open support. */
+
+struct fail_open_data {
+ const struct settings *s;
+ struct rconn *local_rconn;
+ struct rconn *remote_rconn;
+ struct lswitch *lswitch;
+ int last_disconn_secs;
+ time_t boot_deadline;
+};
+
+/* Causes 'r' to enter or leave fail-open mode, if appropriate. */
+static void
+fail_open_periodic_cb(void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ int disconn_secs;
+ bool open;
+
+ if (time_now() < fail_open->boot_deadline) {
+ return;
+ }
+ disconn_secs = rconn_failure_duration(fail_open->remote_rconn);
+ open = disconn_secs >= fail_open->s->probe_interval * 3;
+ if (open != (fail_open->lswitch != NULL)) {
+ if (!open) {
+ VLOG_WARN("No longer in fail-open mode");
+ lswitch_destroy(fail_open->lswitch);
+ fail_open->lswitch = NULL;
+ } else {
+ VLOG_WARN("Could not connect to controller for %d seconds, "
+ "failing open", disconn_secs);
+ fail_open->lswitch = lswitch_create(fail_open->local_rconn, true,
+ fail_open->s->max_idle);
+ fail_open->last_disconn_secs = disconn_secs;
+ }
+ } else if (open && disconn_secs > fail_open->last_disconn_secs + 60) {
+ VLOG_WARN("Still in fail-open mode after %d seconds disconnected "
+ "from controller", disconn_secs);
+ fail_open->last_disconn_secs = disconn_secs;
+ }
+}
+
+static bool
+fail_open_local_packet_cb(struct relay *r, void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ if (!fail_open->lswitch) {
+ return false;
+ } else {
+ lswitch_process_packet(fail_open->lswitch, fail_open->local_rconn,
+ r->halves[HALF_LOCAL].rxbuf);
+ rconn_run(fail_open->local_rconn);
+ return true;
+ }
+}
+
+static void
+fail_open_status_cb(struct status_reply *sr, void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ const struct settings *s = fail_open->s;
+ int trigger_duration = s->probe_interval * 3;
+ int cur_duration = rconn_failure_duration(fail_open->remote_rconn);
+
+ status_reply_put(sr, "trigger-duration=%d", trigger_duration);
+ status_reply_put(sr, "current-duration=%d", cur_duration);
+ status_reply_put(sr, "triggered=%s",
+ cur_duration >= trigger_duration ? "true" : "false");
+ status_reply_put(sr, "max-idle=%d", s->max_idle);
+}
+
+static struct hook
+fail_open_hook_create(const struct settings *s, struct switch_status *ss,
+ struct rconn *local_rconn, struct rconn *remote_rconn)
+{
+ struct fail_open_data *fail_open = xmalloc(sizeof *fail_open);
+ fail_open->s = s;
+ fail_open->local_rconn = local_rconn;
+ fail_open->remote_rconn = remote_rconn;
+ fail_open->lswitch = NULL;
+ fail_open->boot_deadline = time_now() + s->probe_interval * 3;
+ if (s->enable_stp) {
+ fail_open->boot_deadline += STP_EXTRA_BOOT_TIME;
+ }
+ switch_status_register_category(ss, "fail-open",
+ fail_open_status_cb, fail_open);
+ return make_hook(fail_open_local_packet_cb, NULL,
+ fail_open_periodic_cb, NULL, fail_open);
+}
+\f
+struct rate_limiter {