2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "learning-switch.h"
22 #include <netinet/in.h>
27 #include "mac-learning.h"
29 #include "ofp-print.h"
30 #include "openflow/openflow.h"
31 #include "poll-loop.h"
39 #define THIS_MODULE VLM_learning_switch
46 P_FORWARDING = 1 << 3,
51 /* If nonnegative, the switch sets up flows that expire after the given
52 * number of seconds (or never expire, if the value is OFP_FLOW_PERMANENT).
53 * Otherwise, the switch processes every packet. */
56 unsigned long long int datapath_id;
57 uint32_t capabilities;
58 time_t last_features_request;
59 struct mac_learning *ml; /* NULL to act as hub instead of switch. */
60 bool exact_flows; /* Use exact-match flows? */
61 bool action_normal; /* Use OFPP_NORMAL? */
63 /* Number of outgoing queued packets on the rconn. */
64 struct rconn_packet_counter *queued;
66 /* Spanning tree protocol implementation.
68 * We implement STP states by, whenever a port's STP state changes,
69 * querying all the flows on the switch and then deleting any of them that
70 * are inappropriate for a port's STP state. */
71 long long int next_query; /* Next time at which to query all flows. */
72 long long int last_query; /* Last time we sent a query. */
73 long long int last_reply; /* Last time we received a query reply. */
74 unsigned int port_states[STP_MAX_PORTS];
75 uint32_t query_xid; /* XID used for query. */
76 int n_flows, n_no_recv, n_no_send;
79 /* The log messages here could actually be useful in debugging, so keep the
80 * rate limit relatively high. */
81 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
83 static void queue_tx(struct lswitch *, struct rconn *, struct ofpbuf *);
84 static void send_features_request(struct lswitch *, struct rconn *);
85 static void schedule_query(struct lswitch *, long long int delay);
86 static bool may_learn(const struct lswitch *, uint16_t port_no);
87 static bool may_recv(const struct lswitch *, uint16_t port_no,
89 static bool may_send(const struct lswitch *, uint16_t port_no);
91 typedef void packet_handler_func(struct lswitch *, struct rconn *, void *);
92 static packet_handler_func process_switch_features;
93 static packet_handler_func process_packet_in;
94 static packet_handler_func process_echo_request;
95 static packet_handler_func process_port_status;
96 static packet_handler_func process_phy_port;
97 static packet_handler_func process_stats_reply;
99 /* Creates and returns a new learning switch.
101 * If 'learn_macs' is true, the new switch will learn the ports on which MAC
102 * addresses appear. Otherwise, the new switch will flood all packets.
104 * If 'max_idle' is nonnegative, the new switch will set up flows that expire
105 * after the given number of seconds (or never expire, if 'max_idle' is
106 * OFP_FLOW_PERMANENT). Otherwise, the new switch will process every packet.
108 * 'rconn' is used to send out an OpenFlow features request. */
110 lswitch_create(struct rconn *rconn, bool learn_macs,
111 bool exact_flows, int max_idle, bool action_normal)
116 sw = xcalloc(1, sizeof *sw);
117 sw->max_idle = max_idle;
119 sw->last_features_request = time_now() - 1;
120 sw->ml = learn_macs ? mac_learning_create() : NULL;
121 sw->action_normal = action_normal;
122 sw->exact_flows = exact_flows;
123 sw->queued = rconn_packet_counter_create();
124 sw->next_query = LLONG_MIN;
125 sw->last_query = LLONG_MIN;
126 sw->last_reply = LLONG_MIN;
127 for (i = 0; i < STP_MAX_PORTS; i++) {
128 sw->port_states[i] = P_DISABLED;
130 send_features_request(sw, rconn);
136 lswitch_destroy(struct lswitch *sw)
139 mac_learning_destroy(sw->ml);
140 rconn_packet_counter_destroy(sw->queued);
145 /* Takes care of necessary 'sw' activity, except for receiving packets (which
146 * the caller must do). */
148 lswitch_run(struct lswitch *sw, struct rconn *rconn)
150 long long int now = time_msec();
153 mac_learning_run(sw->ml, NULL);
156 /* If we're waiting for more replies, keeping waiting for up to 10 s. */
157 if (sw->last_reply != LLONG_MIN) {
158 if (now - sw->last_reply > 10000) {
159 VLOG_ERR_RL(&rl, "%012llx: No more flow stat replies last 10 s",
161 sw->last_reply = LLONG_MIN;
162 sw->last_query = LLONG_MIN;
163 schedule_query(sw, 0);
169 /* If we're waiting for any reply at all, keep waiting for up to 10 s. */
170 if (sw->last_query != LLONG_MIN) {
171 if (now - sw->last_query > 10000) {
172 VLOG_ERR_RL(&rl, "%012llx: No flow stat replies in last 10 s",
174 sw->last_query = LLONG_MIN;
175 schedule_query(sw, 0);
181 /* If it's time to send another query, do so. */
182 if (sw->next_query != LLONG_MIN && now >= sw->next_query) {
183 sw->next_query = LLONG_MIN;
184 if (!rconn_is_connected(rconn)) {
185 schedule_query(sw, 1000);
187 struct ofp_stats_request *osr;
188 struct ofp_flow_stats_request *ofsr;
192 VLOG_DBG("%012llx: Sending flow stats request to implement STP",
195 sw->last_query = now;
196 sw->query_xid = random_uint32();
200 osr = make_openflow_xid(sizeof *osr + sizeof *ofsr,
201 OFPT_STATS_REQUEST, sw->query_xid, &b);
202 osr->type = htons(OFPST_FLOW);
203 osr->flags = htons(0);
204 ofsr = (struct ofp_flow_stats_request *) osr->body;
205 ofsr->match.wildcards = htonl(OFPFW_ALL);
206 ofsr->table_id = 0xff;
207 ofsr->out_port = htons(OFPP_NONE);
209 error = rconn_send(rconn, b, NULL);
211 VLOG_WARN_RL(&rl, "%012llx: sending flow stats request "
212 "failed: %s", sw->datapath_id, strerror(error));
214 schedule_query(sw, 1000);
221 wait_timeout(long long int started)
223 long long int now = time_msec();
224 long long int timeout = 10000 - (now - started);
226 poll_immediate_wake();
228 poll_timer_wait(timeout);
233 lswitch_wait(struct lswitch *sw)
236 mac_learning_wait(sw->ml);
239 if (sw->last_reply != LLONG_MIN) {
240 wait_timeout(sw->last_reply);
241 } else if (sw->last_query != LLONG_MIN) {
242 wait_timeout(sw->last_query);
246 /* Processes 'msg', which should be an OpenFlow received on 'rconn', according
247 * to the learning switch state in 'sw'. The most likely result of processing
248 * is that flow-setup and packet-out OpenFlow messages will be sent out on
251 lswitch_process_packet(struct lswitch *sw, struct rconn *rconn,
252 const struct ofpbuf *msg)
257 packet_handler_func *handler;
259 static const struct processor processors[] = {
262 sizeof(struct ofp_header),
267 sizeof(struct ofp_switch_features),
268 process_switch_features
272 offsetof(struct ofp_packet_in, data),
277 sizeof(struct ofp_port_status),
282 offsetof(struct ofp_stats_reply, body),
287 sizeof(struct ofp_flow_expired),
291 const size_t n_processors = ARRAY_SIZE(processors);
292 const struct processor *p;
293 struct ofp_header *oh;
296 if (sw->datapath_id == 0
297 && oh->type != OFPT_ECHO_REQUEST
298 && oh->type != OFPT_FEATURES_REPLY) {
299 send_features_request(sw, rconn);
303 for (p = processors; p < &processors[n_processors]; p++) {
304 if (oh->type == p->type) {
305 if (msg->size < p->min_size) {
306 VLOG_WARN_RL(&rl, "%012llx: %s: too short (%zu bytes) for "
307 "type %"PRIu8" (min %zu)", sw->datapath_id,
308 rconn_get_name(rconn), msg->size, oh->type,
313 (p->handler)(sw, rconn, msg->data);
318 if (VLOG_IS_DBG_ENABLED()) {
319 char *p = ofp_to_string(msg->data, msg->size, 2);
320 VLOG_DBG_RL(&rl, "%012llx: OpenFlow packet ignored: %s",
327 send_features_request(struct lswitch *sw, struct rconn *rconn)
329 time_t now = time_now();
330 if (now >= sw->last_features_request + 1) {
332 struct ofp_switch_config *osc;
334 /* Send OFPT_FEATURES_REQUEST. */
335 make_openflow(sizeof(struct ofp_header), OFPT_FEATURES_REQUEST, &b);
336 queue_tx(sw, rconn, b);
338 /* Send OFPT_SET_CONFIG. */
339 osc = make_openflow(sizeof *osc, OFPT_SET_CONFIG, &b);
340 osc->flags = htons(OFPC_SEND_FLOW_EXP);
341 osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
342 queue_tx(sw, rconn, b);
344 sw->last_features_request = now;
349 queue_tx(struct lswitch *sw, struct rconn *rconn, struct ofpbuf *b)
351 int retval = rconn_send_with_limit(rconn, b, sw->queued, 10);
352 if (retval && retval != ENOTCONN) {
353 if (retval == EAGAIN) {
354 VLOG_INFO_RL(&rl, "%012llx: %s: tx queue overflow",
355 sw->datapath_id, rconn_get_name(rconn));
357 VLOG_WARN_RL(&rl, "%012llx: %s: send: %s",
358 sw->datapath_id, rconn_get_name(rconn),
365 schedule_query(struct lswitch *sw, long long int delay)
367 long long int now = time_msec();
368 if (sw->next_query == LLONG_MIN || sw->next_query > now + delay) {
369 sw->next_query = now + delay;
374 process_switch_features(struct lswitch *sw, struct rconn *rconn, void *osf_)
376 struct ofp_switch_features *osf = osf_;
377 size_t n_ports = ((ntohs(osf->header.length)
378 - offsetof(struct ofp_switch_features, ports))
379 / sizeof *osf->ports);
382 sw->datapath_id = ntohll(osf->datapath_id);
383 sw->capabilities = ntohl(osf->capabilities);
384 for (i = 0; i < n_ports; i++) {
385 process_phy_port(sw, rconn, &osf->ports[i]);
387 if (sw->capabilities & OFPC_STP) {
388 schedule_query(sw, 1000);
393 process_packet_in(struct lswitch *sw, struct rconn *rconn, void *opi_)
395 struct ofp_packet_in *opi = opi_;
396 uint16_t in_port = ntohs(opi->in_port);
397 uint16_t out_port = OFPP_FLOOD;
399 size_t pkt_ofs, pkt_len;
403 /* Extract flow data from 'opi' into 'flow'. */
404 pkt_ofs = offsetof(struct ofp_packet_in, data);
405 pkt_len = ntohs(opi->header.length) - pkt_ofs;
406 pkt.data = opi->data;
408 flow_extract(&pkt, in_port, &flow);
410 if (may_learn(sw, in_port) && sw->ml) {
411 if (mac_learning_learn(sw->ml, flow.dl_src, 0, in_port)) {
412 VLOG_DBG_RL(&rl, "%012llx: learned that "ETH_ADDR_FMT" is on "
413 "port %"PRIu16, sw->datapath_id,
414 ETH_ADDR_ARGS(flow.dl_src), in_port);
418 if (eth_addr_is_reserved(flow.dl_src)) {
422 if (!may_recv(sw, in_port, false)) {
423 /* STP prevents receiving anything on this port. */
428 int learned_port = mac_learning_lookup(sw->ml, flow.dl_dst, 0);
429 if (learned_port >= 0 && may_send(sw, learned_port)) {
430 out_port = learned_port;
434 if (in_port == out_port) {
435 /* Don't send out packets on their input ports. */
437 } else if (sw->max_idle >= 0 && (!sw->ml || out_port != OFPP_FLOOD)) {
438 struct ofpbuf *buffer;
439 struct ofp_flow_mod *ofm;
442 /* Check if we need to wildcard the flows. */
443 if (!sw->exact_flows) {
444 /* We can not wildcard all fields.
445 * We need in_port to detect moves.
446 * We need both SA and DA to do learning. */
447 wildcards = (OFPFW_DL_TYPE | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
448 | OFPFW_NW_PROTO | OFPFW_TP_SRC | OFPFW_TP_DST);
454 /* Check if we need to use "NORMAL" action. */
455 if (sw->action_normal && out_port != OFPP_FLOOD) {
456 out_port = OFPP_NORMAL;
459 /* The output port is known, or we always flood everything, so add a
461 buffer = make_add_simple_flow(&flow, ntohl(opi->buffer_id),
462 out_port, sw->max_idle);
464 ofm->match.wildcards = htonl(wildcards);
465 queue_tx(sw, rconn, buffer);
467 /* If the switch didn't buffer the packet, we need to send a copy. */
468 if (ntohl(opi->buffer_id) == UINT32_MAX) {
470 make_unbuffered_packet_out(&pkt, in_port, out_port));
475 /* Check if we need to use "NORMAL" action. */
476 if (sw->action_normal && out_port != OFPP_FLOOD) {
477 out_port = OFPP_NORMAL;
480 /* We don't know that MAC, or we don't set up flows. Send along the
481 * packet without setting up a flow. */
482 if (ntohl(opi->buffer_id) == UINT32_MAX) {
483 b = make_unbuffered_packet_out(&pkt, in_port, out_port);
485 b = make_buffered_packet_out(ntohl(opi->buffer_id),
488 queue_tx(sw, rconn, b);
493 if (sw->max_idle >= 0) {
494 /* Set up a flow to drop packets. */
495 queue_tx(sw, rconn, make_add_flow(&flow, ntohl(opi->buffer_id),
498 /* Just drop the packet, since we don't set up flows at all.
499 * XXX we should send a packet_out with no actions if buffer_id !=
500 * UINT32_MAX, to avoid clogging the kernel buffers. */
506 process_echo_request(struct lswitch *sw, struct rconn *rconn, void *rq_)
508 struct ofp_header *rq = rq_;
509 queue_tx(sw, rconn, make_echo_reply(rq));
513 process_port_status(struct lswitch *sw, struct rconn *rconn, void *ops_)
515 struct ofp_port_status *ops = ops_;
516 process_phy_port(sw, rconn, &ops->desc);
520 process_phy_port(struct lswitch *sw, struct rconn *rconn OVS_UNUSED,
523 const struct ofp_phy_port *opp = opp_;
524 uint16_t port_no = ntohs(opp->port_no);
525 if (sw->capabilities & OFPC_STP && port_no < STP_MAX_PORTS) {
526 uint32_t config = ntohl(opp->config);
527 uint32_t state = ntohl(opp->state);
528 unsigned int *port_state = &sw->port_states[port_no];
529 unsigned int new_port_state;
531 if (!(config & (OFPPC_NO_STP | OFPPC_PORT_DOWN))
532 && !(state & OFPPS_LINK_DOWN))
534 switch (state & OFPPS_STP_MASK) {
535 case OFPPS_STP_LISTEN:
536 new_port_state = P_LISTENING;
538 case OFPPS_STP_LEARN:
539 new_port_state = P_LEARNING;
541 case OFPPS_STP_FORWARD:
542 new_port_state = P_FORWARDING;
544 case OFPPS_STP_BLOCK:
545 new_port_state = P_BLOCKING;
548 new_port_state = P_DISABLED;
552 new_port_state = P_FORWARDING;
554 if (*port_state != new_port_state) {
555 *port_state = new_port_state;
556 schedule_query(sw, 1000);
562 get_port_state(const struct lswitch *sw, uint16_t port_no)
564 return (port_no >= STP_MAX_PORTS || !(sw->capabilities & OFPC_STP)
566 : sw->port_states[port_no]);
570 may_learn(const struct lswitch *sw, uint16_t port_no)
572 return get_port_state(sw, port_no) & (P_LEARNING | P_FORWARDING);
576 may_recv(const struct lswitch *sw, uint16_t port_no, bool any_actions)
578 unsigned int state = get_port_state(sw, port_no);
580 ? state & (P_DISABLED | P_LISTENING | P_BLOCKING)
581 : state & (P_DISABLED | P_LISTENING | P_BLOCKING | P_LEARNING));
585 may_send(const struct lswitch *sw, uint16_t port_no)
587 return get_port_state(sw, port_no) & P_FORWARDING;
591 process_flow_stats(struct lswitch *sw, struct rconn *rconn,
592 const struct ofp_flow_stats *ofs)
594 const char *end = (char *) ofs + ntohs(ofs->length);
597 /* Decide to delete the flow if it matches on an STP-disabled physical
598 * port. But don't delete it if the flow just drops all received packets,
599 * because that's a perfectly reasonable thing to do for disabled physical
601 if (!(ofs->match.wildcards & htonl(OFPFW_IN_PORT))) {
602 if (!may_recv(sw, ntohs(ofs->match.in_port),
603 end > (char *) ofs->actions)) {
609 /* Decide to delete the flow if it forwards to an STP-disabled physical
612 const struct ofp_action_header *a;
615 for (a = ofs->actions; (char *) a < end; a += len / 8) {
617 if (len > end - (char *) a) {
618 VLOG_DBG_RL(&rl, "%012llx: action exceeds available space "
620 sw->datapath_id, len, end - (char *) a);
622 } else if (len % 8) {
623 VLOG_DBG_RL(&rl, "%012llx: action length (%zu) not multiple "
624 "of 8 bytes", sw->datapath_id, len);
628 if (a->type == htons(OFPAT_OUTPUT)) {
629 struct ofp_action_output *oao = (struct ofp_action_output *) a;
630 if (!may_send(sw, ntohs(oao->port))) {
639 /* Delete the flow. */
641 struct ofp_flow_mod *ofm;
644 ofm = make_openflow(offsetof(struct ofp_flow_mod, actions),
646 ofm->match = ofs->match;
647 ofm->command = OFPFC_DELETE_STRICT;
648 rconn_send(rconn, b, NULL);
653 process_stats_reply(struct lswitch *sw, struct rconn *rconn, void *osr_)
655 struct ofp_stats_reply *osr = osr_;
656 struct flow_stats_iterator i;
657 const struct ofp_flow_stats *fs;
659 if (sw->last_query == LLONG_MIN
660 || osr->type != htons(OFPST_FLOW)
661 || osr->header.xid != sw->query_xid) {
664 for (fs = flow_stats_first(&i, osr); fs; fs = flow_stats_next(&i)) {
666 process_flow_stats(sw, rconn, fs);
668 if (!(osr->flags & htons(OFPSF_REPLY_MORE))) {
669 VLOG_DBG("%012llx: Deleted %d of %d received flows to "
670 "implement STP, %d because of no-recv, %d because of "
671 "no-send", sw->datapath_id,
672 sw->n_no_recv + sw->n_no_send, sw->n_flows,
673 sw->n_no_recv, sw->n_no_send);
674 sw->last_query = LLONG_MIN;
675 sw->last_reply = LLONG_MIN;
677 sw->last_reply = time_msec();