X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=069d5e5184630ef21c4aaca7a0c1af84ed3f476e;hb=9d82ec478d52edfddd215dff1b0659ed7508b365;hp=2763efbd010daac0eabdb6935998bc8d97066d18;hpb=43253595291318833572088595769b45a79c9c54;p=openvswitch diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 2763efbd..069d5e51 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -19,6 +19,7 @@ #include "ofproto.h" #include #include +#include #include #include #include @@ -223,6 +224,8 @@ static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, static void ofconn_destroy(struct ofconn *); static void ofconn_run(struct ofconn *, struct ofproto *); static void ofconn_wait(struct ofconn *); +static bool ofconn_receives_async_msgs(const struct ofconn *); + static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn, struct rconn_packet_counter *counter); @@ -961,25 +964,47 @@ process_port_change(struct ofproto *ofproto, int error, char *devname) } } +/* Returns a "preference level" for snooping 'ofconn'. A higher return value + * means that 'ofconn' is more interesting for monitoring than a lower return + * value. */ +static int +snoop_preference(const struct ofconn *ofconn) +{ + switch (ofconn->role) { + case NX_ROLE_MASTER: + return 3; + case NX_ROLE_OTHER: + return 2; + case NX_ROLE_SLAVE: + return 1; + default: + /* Shouldn't happen. */ + return 0; + } +} + /* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'. * Connects this vconn to a controller. */ static void add_snooper(struct ofproto *ofproto, struct vconn *vconn) { - struct ofconn *ofconn; + struct ofconn *ofconn, *best; - /* Arbitrarily pick the first controller in the list for monitoring. We - * could do something smarter or more flexible later, if it ever proves - * useful. */ + /* Pick a controller for monitoring. */ + best = NULL; LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) { - if (ofconn->type == OFCONN_CONTROLLER) { - rconn_add_monitor(ofconn->rconn, vconn); - return; + if (ofconn->type == OFCONN_CONTROLLER + && (!best || snoop_preference(ofconn) > snoop_preference(best))) { + best = ofconn; } + } + if (best) { + rconn_add_monitor(best->rconn, vconn); + } else { + VLOG_INFO_RL(&rl, "no controller connection to snoop"); + vconn_close(vconn); } - VLOG_INFO_RL(&rl, "no controller connection to monitor"); - vconn_close(vconn); } int @@ -1131,7 +1156,7 @@ ofproto_wait(struct ofproto *p) ofconn_wait(ofconn); } if (p->in_band) { - poll_timer_wait(p->next_in_band_update - time_msec()); + poll_timer_wait_until(p->next_in_band_update); in_band_wait(p->in_band); } if (p->fail_open) { @@ -1148,7 +1173,7 @@ ofproto_wait(struct ofproto *p) VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()"); poll_immediate_wake(); } else if (p->next_expiration != LLONG_MAX) { - poll_timer_wait(p->next_expiration - time_msec()); + poll_timer_wait_until(p->next_expiration); } for (i = 0; i < p->n_listeners; i++) { pvconn_wait(p->listeners[i]); @@ -1402,7 +1427,7 @@ send_port_status(struct ofproto *p, const struct ofport *ofport, struct ofp_port_status *ops; struct ofpbuf *b; - if (ofconn->role == NX_ROLE_SLAVE) { + if (!ofconn_receives_async_msgs(ofconn)) { continue; } @@ -1651,6 +1676,22 @@ ofconn_wait(struct ofconn *ofconn) COVERAGE_INC(ofproto_ofconn_stuck); } } + +/* Returns true if 'ofconn' should receive asynchronous messages. */ +static bool +ofconn_receives_async_msgs(const struct ofconn *ofconn) +{ + if (ofconn->type == OFCONN_CONTROLLER) { + /* Ordinary controllers always get asynchronous messages unless they + * have configured themselves as "slaves". */ + return ofconn->role != NX_ROLE_SLAVE; + } else { + /* Transient connections don't get asynchronous messages unless they + * have explicitly asked for them by setting a nonzero miss send + * length. */ + return ofconn->miss_send_len > 0; + } +} /* Caller is responsible for initializing the 'cr' member of the returned * rule. */ @@ -2205,7 +2246,7 @@ add_controller_action(struct odp_actions *actions, const struct ofp_action_output *oao) { union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER); - a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX; + a->controller.arg = ntohs(oao->max_len); } struct action_xlate_ctx { @@ -3500,7 +3541,7 @@ handle_role_request(struct ofproto *ofproto, uint32_t role; if (ntohs(msg->header.length) != sizeof *nrr) { - VLOG_WARN_RL(&rl, "received role request of length %zu (expected %zu)", + VLOG_WARN_RL(&rl, "received role request of length %u (expected %zu)", ntohs(msg->header.length), sizeof *nrr); return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); } @@ -3550,7 +3591,7 @@ handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg) struct nicira_header *nh; if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) { - VLOG_WARN_RL(&rl, "received vendor message of length %zu " + VLOG_WARN_RL(&rl, "received vendor message of length %u " "(expected at least %zu)", ntohs(ovh->header.length), sizeof(struct ofp_vendor_header)); return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); @@ -3559,7 +3600,7 @@ handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg) return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR); } if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) { - VLOG_WARN_RL(&rl, "received Nicira vendor message of length %zu " + VLOG_WARN_RL(&rl, "received Nicira vendor message of length %u " "(expected at least %zu)", ntohs(ovh->header.length), sizeof(struct nicira_header)); return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); @@ -3864,7 +3905,7 @@ send_flow_removed(struct ofproto *p, struct rule *rule, prev = NULL; LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) { if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn) - && ofconn->role != NX_ROLE_SLAVE) { + && ofconn_receives_async_msgs(ofconn)) { if (prev) { queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter); } else { @@ -4008,10 +4049,14 @@ do_send_packet_in(struct ofpbuf *packet, void *ofconn_) * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s * packet scheduler for sending. * + * 'max_len' specifies the maximum number of bytes of the packet to send on + * 'ofconn' (INT_MAX specifies no limit). + * * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise, * ownership is transferred to this function. */ static void -schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, bool clone) +schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len, + bool clone) { struct ofproto *ofproto = ofconn->ofproto; struct ofp_packet_in *opi = packet->data; @@ -4024,6 +4069,8 @@ schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, bool clone) buffer_id = UINT32_MAX; } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { buffer_id = pktbuf_get_null(); + } else if (!ofconn->pktbuf) { + buffer_id = UINT32_MAX; } else { struct ofpbuf payload; payload.data = opi->data; @@ -4036,6 +4083,7 @@ schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, bool clone) if (buffer_id != UINT32_MAX) { send_len = MIN(send_len, ofconn->miss_send_len); } + send_len = MIN(send_len, max_len); /* Adjust packet length and clone if necessary. */ trim_size = offsetof(struct ofp_packet_in, data) + send_len; @@ -4064,8 +4112,11 @@ schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, bool clone) * The conversion is not complete: the caller still needs to trim any unneeded * payload off the end of the buffer, set the length in the OpenFlow header, * and set buffer_id. Those require us to know the controller settings and so - * must be done on a per-controller basis. */ -static void + * must be done on a per-controller basis. + * + * Returns the maximum number of bytes of the packet that should be sent to + * the controller (INT_MAX if no limit). */ +static int do_convert_to_packet_in(struct ofpbuf *packet) { struct odp_msg *msg = packet->data; @@ -4073,9 +4124,16 @@ do_convert_to_packet_in(struct ofpbuf *packet) uint8_t reason; uint16_t total_len; uint16_t in_port; + int max_len; /* Extract relevant header fields */ - reason = (msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH); + if (msg->type == _ODPL_ACTION_NR) { + reason = OFPR_ACTION; + max_len = msg->arg; + } else { + reason = OFPR_NO_MATCH; + max_len = INT_MAX; + } total_len = msg->length - sizeof *msg; in_port = odp_port_to_ofp_port(msg->port); @@ -4087,6 +4145,8 @@ do_convert_to_packet_in(struct ofpbuf *packet) opi->total_len = htons(total_len); opi->in_port = htons(in_port); opi->reason = reason; + + return max_len; } /* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or @@ -4101,20 +4161,21 @@ static void send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet) { struct ofconn *ofconn, *prev; + int max_len; - do_convert_to_packet_in(packet); + max_len = do_convert_to_packet_in(packet); prev = NULL; LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) { - if (ofconn->role != NX_ROLE_SLAVE) { + if (ofconn_receives_async_msgs(ofconn)) { if (prev) { - schedule_packet_in(prev, packet, true); + schedule_packet_in(prev, packet, max_len, true); } prev = ofconn; } } if (prev) { - schedule_packet_in(prev, packet, false); + schedule_packet_in(prev, packet, max_len, false); } else { ofpbuf_delete(packet); }