X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fconnmgr.c;h=7e7aaae3d153d1617888722f31fbe211e5143dd1;hb=be44585c21cc8dbe097a460a930696f0350dc12d;hp=95cc43d1fabf2ca350f70fcef4daa637fb0b125b;hpb=2e2dd32bf7e78fedc6eec6f2d28a98e4781a24e4;p=openvswitch diff --git a/ofproto/connmgr.c b/ofproto/connmgr.c index 95cc43d1..7e7aaae3 100644 --- a/ofproto/connmgr.c +++ b/ofproto/connmgr.c @@ -22,7 +22,6 @@ #include #include "coverage.h" -#include "dpif.h" #include "fail-open.h" #include "in-band.h" #include "odp-util.h" @@ -31,6 +30,7 @@ #include "pinsched.h" #include "poll-loop.h" #include "pktbuf.h" +#include "private.h" #include "rconn.h" #include "shash.h" #include "timeval.h" @@ -49,6 +49,7 @@ struct ofconn { struct rconn *rconn; /* OpenFlow connection. */ enum ofconn_type type; /* Type. */ enum nx_flow_format flow_format; /* Currently selected flow format. */ + bool flow_mod_table_id; /* NXT_FLOW_MOD_TABLE_ID enabled? */ /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ @@ -319,7 +320,7 @@ static struct ofconn *find_controller_by_target(struct connmgr *, const char *target); static void update_fail_open(struct connmgr *); static int set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp, - const struct svec *); + const struct sset *); /* Returns true if 'mgr' has any configured primary controllers. * @@ -390,10 +391,10 @@ connmgr_set_controllers(struct connmgr *mgr, const struct ofproto_controller *controllers, size_t n_controllers) { + bool had_controllers = connmgr_has_controllers(mgr); struct shash new_controllers; struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice, *next_ofservice; - bool ss_exists; size_t i; /* Create newly configured controllers and services. @@ -421,7 +422,6 @@ connmgr_set_controllers(struct connmgr *mgr, /* Delete controllers that are no longer configured. * Update configuration of all now-existing controllers. */ - ss_exists = false; HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &mgr->controllers) { struct ofproto_controller *c; @@ -451,6 +451,9 @@ connmgr_set_controllers(struct connmgr *mgr, update_in_band_remotes(mgr); update_fail_open(mgr); + if (had_controllers != connmgr_has_controllers(mgr)) { + ofproto_flush_flows(mgr->ofproto); + } } /* Drops the connections between 'mgr' and all of its primary and secondary @@ -470,22 +473,29 @@ connmgr_reconnect(const struct connmgr *mgr) * A "snoop" is a pvconn to which every OpenFlow message to or from the most * important controller on 'mgr' is mirrored. */ int -connmgr_set_snoops(struct connmgr *mgr, const struct svec *snoops) +connmgr_set_snoops(struct connmgr *mgr, const struct sset *snoops) { return set_pvconns(&mgr->snoops, &mgr->n_snoops, snoops); } /* Adds each of the snoops currently configured on 'mgr' to 'snoops'. */ void -connmgr_get_snoops(const struct connmgr *mgr, struct svec *snoops) +connmgr_get_snoops(const struct connmgr *mgr, struct sset *snoops) { size_t i; for (i = 0; i < mgr->n_snoops; i++) { - svec_add(snoops, pvconn_get_name(mgr->snoops[i])); + sset_add(snoops, pvconn_get_name(mgr->snoops[i])); } } +/* Returns true if 'mgr' has at least one snoop, false if it has none. */ +bool +connmgr_has_snoops(const struct connmgr *mgr) +{ + return mgr->n_snoops > 0; +} + /* Creates a new controller for 'target' in 'mgr'. update_controller() needs * to be called later to finish the new ofconn's configuration. */ static void @@ -583,10 +593,11 @@ update_fail_open(struct connmgr *mgr) static int set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp, - const struct svec *svec) + const struct sset *sset) { struct pvconn **pvconns = *pvconnsp; size_t n_pvconns = *n_pvconnsp; + const char *name; int retval = 0; size_t i; @@ -595,10 +606,9 @@ set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp, } free(pvconns); - pvconns = xmalloc(svec->n * sizeof *pvconns); + pvconns = xmalloc(sset_count(sset) * sizeof *pvconns); n_pvconns = 0; - for (i = 0; i < svec->n; i++) { - const char *name = svec->names[i]; + SSET_FOR_EACH (name, sset) { struct pvconn *pvconn; int error; @@ -713,6 +723,24 @@ ofconn_set_flow_format(struct ofconn *ofconn, enum nx_flow_format flow_format) ofconn->flow_format = flow_format; } +/* Returns true if the NXT_FLOW_MOD_TABLE_ID extension is enabled, false + * otherwise. + * + * By default the extension is not enabled. */ +bool +ofconn_get_flow_mod_table_id(const struct ofconn *ofconn) +{ + return ofconn->flow_mod_table_id; +} + +/* Enables or disables (according to 'enable') the NXT_FLOW_MOD_TABLE_ID + * extension on 'ofconn'. */ +void +ofconn_set_flow_mod_table_id(struct ofconn *ofconn, bool enable) +{ + ofconn->flow_mod_table_id = enable; +} + /* Returns the default miss send length for 'ofconn'. */ int ofconn_get_miss_send_len(const struct ofconn *ofconn) @@ -762,6 +790,7 @@ ofconn_create(struct connmgr *mgr, struct rconn *rconn, enum ofconn_type type) ofconn->rconn = rconn; ofconn->type = type; ofconn->flow_format = NXFF_OPENFLOW10; + ofconn->flow_mod_table_id = false; ofconn->role = NX_ROLE_OTHER; ofconn->packet_in_counter = rconn_packet_counter_create (); ofconn->pktbuf = NULL; @@ -914,13 +943,11 @@ ofconn_send(const struct ofconn *ofconn, struct ofpbuf *msg, /* Sending asynchronous messages. */ -static void schedule_packet_in(struct ofconn *, const struct dpif_upcall *, +static void schedule_packet_in(struct ofconn *, struct ofputil_packet_in, const struct flow *, struct ofpbuf *rw_packet); /* Sends an OFPT_PORT_STATUS message with 'opp' and 'reason' to appropriate - * controllers managed by 'mgr'. - * - * 'opp' is in *HOST* byte order. */ + * controllers managed by 'mgr'. */ void connmgr_send_port_status(struct connmgr *mgr, const struct ofp_phy_port *opp, uint8_t reason) @@ -942,7 +969,6 @@ connmgr_send_port_status(struct connmgr *mgr, const struct ofp_phy_port *opp, ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b); ops->reason = reason; ops->desc = *opp; - hton_ofp_phy_port(&ops->desc); ofconn_send(ofconn, b, NULL); } } @@ -972,15 +998,15 @@ connmgr_send_flow_removed(struct connmgr *mgr, } } -/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an - * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to - * their individual configurations. +/* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as + * necessary according to their individual configurations. * * 'rw_packet' may be NULL. Otherwise, 'rw_packet' must contain the same data - * as upcall->packet. (rw_packet == upcall->packet is also valid.) Ownership - * of 'rw_packet' is transferred to this function. */ + * as pin->packet. (rw_packet == pin->packet is also valid.) Ownership of + * 'rw_packet' is transferred to this function. */ void -connmgr_send_packet_in(struct connmgr *mgr, const struct dpif_upcall *upcall, +connmgr_send_packet_in(struct connmgr *mgr, + const struct ofputil_packet_in *pin, const struct flow *flow, struct ofpbuf *rw_packet) { struct ofconn *ofconn, *prev; @@ -989,13 +1015,13 @@ connmgr_send_packet_in(struct connmgr *mgr, const struct dpif_upcall *upcall, LIST_FOR_EACH (ofconn, node, &mgr->all_conns) { if (ofconn_receives_async_msgs(ofconn)) { if (prev) { - schedule_packet_in(prev, upcall, flow, NULL); + schedule_packet_in(prev, *pin, flow, NULL); } prev = ofconn; } } if (prev) { - schedule_packet_in(prev, upcall, flow, rw_packet); + schedule_packet_in(prev, *pin, flow, rw_packet); } else { ofpbuf_delete(rw_packet); } @@ -1011,50 +1037,45 @@ do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_) ofconn->packet_in_counter, 100); } -/* Takes 'upcall', whose packet has the flow specified by 'flow', composes an +/* Takes 'pin', whose packet has the flow specified by 'flow', composes an * OpenFlow packet-in message from it, and passes it to 'ofconn''s packet * scheduler for sending. * * 'rw_packet' may be NULL. Otherwise, 'rw_packet' must contain the same data - * as upcall->packet. (rw_packet == upcall->packet is also valid.) Ownership - * of 'rw_packet' is transferred to this function. */ + * as pin->packet. (rw_packet == pin->packet is also valid.) Ownership of + * 'rw_packet' is transferred to this function. */ static void -schedule_packet_in(struct ofconn *ofconn, const struct dpif_upcall *upcall, +schedule_packet_in(struct ofconn *ofconn, struct ofputil_packet_in pin, const struct flow *flow, struct ofpbuf *rw_packet) { struct connmgr *mgr = ofconn->connmgr; - struct ofputil_packet_in pin; - - /* Figure out the easy parts. */ - pin.packet = upcall->packet; - pin.in_port = odp_port_to_ofp_port(flow->in_port); - pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; /* Get OpenFlow buffer_id. */ - if (upcall->type == DPIF_UC_ACTION) { + if (pin.reason == OFPR_ACTION) { pin.buffer_id = UINT32_MAX; } else if (mgr->fail_open && fail_open_is_active(mgr->fail_open)) { pin.buffer_id = pktbuf_get_null(); } else if (!ofconn->pktbuf) { pin.buffer_id = UINT32_MAX; } else { - pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, - flow->in_port); + pin.buffer_id = pktbuf_save(ofconn->pktbuf, pin.packet, flow->in_port); } /* Figure out how much of the packet to send. */ - pin.send_len = upcall->packet->size; + if (pin.reason == OFPR_NO_MATCH) { + pin.send_len = pin.packet->size; + } else { + /* Caller should have initialized 'send_len' to 'max_len' specified in + * struct ofp_action_output. */ + } if (pin.buffer_id != UINT32_MAX) { pin.send_len = MIN(pin.send_len, ofconn->miss_send_len); } - if (upcall->type == DPIF_UC_ACTION) { - pin.send_len = MIN(pin.send_len, upcall->userdata); - } /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might * immediately call into do_send_packet_in() or it might buffer it for a * while (until a later call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1], + pinsched_send(ofconn->schedulers[pin.reason == OFPR_NO_MATCH ? 0 : 1], flow->in_port, ofputil_encode_packet_in(&pin, rw_packet), do_send_packet_in, ofconn); } @@ -1074,8 +1095,13 @@ connmgr_get_fail_mode(const struct connmgr *mgr) void connmgr_set_fail_mode(struct connmgr *mgr, enum ofproto_fail_mode fail_mode) { - mgr->fail_mode = fail_mode; - update_fail_open(mgr); + if (mgr->fail_mode != fail_mode) { + mgr->fail_mode = fail_mode; + update_fail_open(mgr); + if (!connmgr_has_controllers(mgr)) { + ofproto_flush_flows(mgr->ofproto); + } + } } /* Fail-open implementation. */ @@ -1258,6 +1284,23 @@ connmgr_flushed(struct connmgr *mgr) if (mgr->fail_open) { fail_open_flushed(mgr->fail_open); } + + /* If there are no controllers and we're in standalone mode, set up a flow + * that matches every packet and directs them to OFPP_NORMAL (which goes to + * us). Otherwise, the switch is in secure mode and we won't pass any + * traffic until a controller has been defined and it tells us to do so. */ + if (!connmgr_has_controllers(mgr) + && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) { + union ofp_action action; + struct cls_rule rule; + + memset(&action, 0, sizeof action); + action.type = htons(OFPAT_OUTPUT); + action.output.len = htons(sizeof action); + action.output.port = htons(OFPP_NORMAL); + cls_rule_init_catchall(&rule, 0); + ofproto_add_flow(mgr->ofproto, &rule, &action, 1); + } } /* Creates a new ofservice for 'target' in 'mgr'. Returns 0 if successful,