1 /* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
4 * We are making the OpenFlow specification and associated documentation
5 * (Software) available for public use and benefit with the expectation
6 * that others will use, modify and enhance the Software and contribute
7 * those enhancements back to the community. However, since we would
8 * like to make the Software available for broadest use, with as few
9 * restrictions as possible permission is hereby granted, free of
10 * charge, to any person obtaining a copy of this Software to deal in
11 * the Software under the copyrights without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * The name and trademarks of copyright holder(s) may NOT be used in
30 * advertising or publicity pertaining to the Software or any
31 * derivatives without specific, written prior permission.
35 #include <arpa/inet.h>
49 #include "poll-loop.h"
52 #include "switch-flow.h"
58 #define THIS_MODULE VLM_datapath
64 extern char serial_num;
66 /* Capabilities supported by this implementation. */
67 #define OFP_SUPPORTED_CAPABILITIES ( OFPC_FLOW_STATS \
72 /* Actions supported by this implementation. */
73 #define OFP_SUPPORTED_ACTIONS ( (1 << OFPAT_OUTPUT) \
74 | (1 << OFPAT_SET_DL_VLAN) \
75 | (1 << OFPAT_SET_DL_SRC) \
76 | (1 << OFPAT_SET_DL_DST) \
77 | (1 << OFPAT_SET_NW_SRC) \
78 | (1 << OFPAT_SET_NW_DST) \
79 | (1 << OFPAT_SET_TP_SRC) \
80 | (1 << OFPAT_SET_TP_DST) )
83 uint32_t config; /* Some subset of OFPPC_* flags. */
84 uint32_t state; /* Some subset of OFPPS_* flags. */
86 struct netdev *netdev;
87 struct list node; /* Element in datapath.ports. */
88 unsigned long long int rx_packets, tx_packets;
89 unsigned long long int rx_bytes, tx_bytes;
90 unsigned long long int tx_dropped;
93 /* The origin of a received OpenFlow message, to enable sending a reply. */
95 struct remote *remote; /* The device that sent the message. */
96 uint32_t xid; /* The OpenFlow transaction ID. */
99 /* A connection to a controller or a management device. */
103 #define TXQ_LIMIT 128 /* Max number of packets to queue for tx. */
104 int n_txq; /* Number of packets queued for tx on rconn. */
106 /* Support for reliable, multi-message replies to requests.
108 * If an incoming request needs to have a reliable reply that might
109 * require multiple messages, it can use remote_start_dump() to set up
110 * a callback that will be called as buffer space for replies. */
111 int (*cb_dump)(struct datapath *, void *aux);
112 void (*cb_done)(void *aux);
117 /* Remote connections. */
118 struct remote *controller; /* Connection to controller. */
119 struct list remotes; /* All connections (including controller). */
120 struct pvconn *listen_pvconn;
124 /* Unique identifier for this datapath */
127 struct sw_chain *chain; /* Forwarding rules. */
129 /* Configuration set from controller. */
131 uint16_t miss_send_len;
134 struct sw_port ports[OFPP_MAX];
135 struct list port_list; /* List of ports, for flooding. */
138 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
140 static struct remote *remote_create(struct datapath *, struct rconn *);
141 static void remote_run(struct datapath *, struct remote *);
142 static void remote_wait(struct remote *);
143 static void remote_destroy(struct remote *);
145 void dp_output_port(struct datapath *, struct ofpbuf *,
146 int in_port, int out_port, bool ignore_no_fwd);
147 void dp_update_port_flags(struct datapath *dp, const struct ofp_port_mod *opm);
148 void dp_output_control(struct datapath *, struct ofpbuf *, int in_port,
149 size_t max_len, int reason);
150 static void send_flow_expired(struct datapath *, struct sw_flow *,
151 enum ofp_flow_expired_reason);
152 static int update_port_status(struct sw_port *p);
153 static void send_port_status(struct sw_port *p, uint8_t status);
154 static void del_switch_port(struct sw_port *p);
155 static void execute_actions(struct datapath *, struct ofpbuf *,
156 int in_port, const struct sw_flow_key *,
157 const struct ofp_action *, int n_actions,
159 static void modify_vlan(struct ofpbuf *buffer, const struct sw_flow_key *key,
160 const struct ofp_action *a);
161 static void modify_nh(struct ofpbuf *buffer, uint16_t eth_proto,
162 uint8_t nw_proto, const struct ofp_action *a);
163 static void modify_th(struct ofpbuf *buffer, uint16_t eth_proto,
164 uint8_t nw_proto, const struct ofp_action *a);
166 /* Buffers are identified to userspace by a 31-bit opaque ID. We divide the ID
167 * into a buffer number (low bits) and a cookie (high bits). The buffer number
168 * is an index into an array of buffers. The cookie distinguishes between
169 * different packets that have occupied a single buffer. Thus, the more
170 * buffers we have, the lower-quality the cookie... */
171 #define PKT_BUFFER_BITS 8
172 #define N_PKT_BUFFERS (1 << PKT_BUFFER_BITS)
173 #define PKT_BUFFER_MASK (N_PKT_BUFFERS - 1)
175 #define PKT_COOKIE_BITS (32 - PKT_BUFFER_BITS)
177 int run_flow_through_tables(struct datapath *, struct ofpbuf *,
179 void fwd_port_input(struct datapath *, struct ofpbuf *, struct sw_port *);
180 int fwd_control_input(struct datapath *, const struct sender *,
181 const void *, size_t);
183 uint32_t save_buffer(struct ofpbuf *);
184 static struct ofpbuf *retrieve_buffer(uint32_t id);
185 static void discard_buffer(uint32_t id);
187 static int port_no(struct datapath *dp, struct sw_port *p)
189 assert(p >= dp->ports && p < &dp->ports[ARRAY_SIZE(dp->ports)]);
190 return p - dp->ports;
193 /* Generates and returns a random datapath id. */
195 gen_datapath_id(void)
197 uint8_t ea[ETH_ADDR_LEN];
199 return eth_addr_to_uint64(ea);
203 dp_new(struct datapath **dp_, uint64_t dpid, struct rconn *rconn)
207 dp = calloc(1, sizeof *dp);
212 dp->last_timeout = time_now();
213 list_init(&dp->remotes);
214 dp->controller = remote_create(dp, rconn);
215 dp->listen_pvconn = NULL;
216 dp->id = dpid <= UINT64_C(0xffffffffffff) ? dpid : gen_datapath_id();
217 dp->chain = chain_create();
219 VLOG_ERR("could not create chain");
224 list_init(&dp->port_list);
226 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
232 dp_add_port(struct datapath *dp, const char *name)
234 struct netdev *netdev;
240 error = netdev_open(name, NETDEV_ETH_TYPE_ANY, &netdev);
244 error = netdev_set_flags(netdev, NETDEV_UP | NETDEV_PROMISC, false);
246 VLOG_ERR("couldn't set promiscuous mode on %s device", name);
247 netdev_close(netdev);
250 if (netdev_get_in4(netdev, &in4)) {
251 VLOG_ERR("%s device has assigned IP address %s", name, inet_ntoa(in4));
253 if (netdev_get_in6(netdev, &in6)) {
254 char in6_name[INET6_ADDRSTRLEN + 1];
255 inet_ntop(AF_INET6, &in6, in6_name, sizeof in6_name);
256 VLOG_ERR("%s device has assigned IPv6 address %s", name, in6_name);
259 for (p = dp->ports; ; p++) {
260 if (p >= &dp->ports[ARRAY_SIZE(dp->ports)]) {
262 } else if (!p->netdev) {
267 memset(p, '\0', sizeof *p);
271 list_push_back(&dp->port_list, &p->node);
273 /* Notify the ctlpath that this port has been added */
274 send_port_status(p, OFPPR_ADD);
280 dp_add_listen_pvconn(struct datapath *dp, struct pvconn *listen_pvconn)
282 assert(!dp->listen_pvconn);
283 dp->listen_pvconn = listen_pvconn;
287 dp_run(struct datapath *dp)
289 time_t now = time_now();
290 struct sw_port *p, *pn;
291 struct remote *r, *rn;
292 struct ofpbuf *buffer = NULL;
294 if (now != dp->last_timeout) {
295 struct list deleted = LIST_INITIALIZER(&deleted);
296 struct sw_flow *f, *n;
298 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
299 if (update_port_status(p)) {
300 send_port_status(p, OFPPR_MODIFY);
304 chain_timeout(dp->chain, &deleted);
305 LIST_FOR_EACH_SAFE (f, n, struct sw_flow, node, &deleted) {
306 send_flow_expired(dp, f, f->reason);
307 list_remove(&f->node);
310 dp->last_timeout = now;
312 poll_timer_wait(1000);
314 LIST_FOR_EACH_SAFE (p, pn, struct sw_port, node, &dp->port_list) {
318 /* Allocate buffer with some headroom to add headers in forwarding
319 * to the controller or adding a vlan tag, plus an extra 2 bytes to
320 * allow IP headers to be aligned on a 4-byte boundary. */
321 const int headroom = 128 + 2;
322 const int hard_header = VLAN_ETH_HEADER_LEN;
323 const int mtu = netdev_get_mtu(p->netdev);
324 buffer = ofpbuf_new(headroom + hard_header + mtu);
325 buffer->data = (char*)buffer->data + headroom;
327 error = netdev_recv(p->netdev, buffer);
330 p->rx_bytes += buffer->size;
331 fwd_port_input(dp, buffer, p);
333 } else if (error != EAGAIN) {
334 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
335 netdev_get_name(p->netdev), strerror(error));
338 ofpbuf_delete(buffer);
340 /* Talk to remotes. */
341 LIST_FOR_EACH_SAFE (r, rn, struct remote, node, &dp->remotes) {
344 if (dp->listen_pvconn) {
346 struct vconn *new_vconn;
349 retval = pvconn_accept(dp->listen_pvconn, OFP_VERSION, &new_vconn);
351 if (retval != EAGAIN) {
352 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
356 remote_create(dp, rconn_new_from_vconn("passive", new_vconn));
362 remote_run(struct datapath *dp, struct remote *r)
368 /* Do some remote processing, but cap it at a reasonable amount so that
369 * other processing doesn't starve. */
370 for (i = 0; i < 50; i++) {
372 struct ofpbuf *buffer;
373 struct ofp_header *oh;
375 buffer = rconn_recv(r->rconn);
380 if (buffer->size >= sizeof *oh) {
381 struct sender sender;
385 sender.xid = oh->xid;
386 fwd_control_input(dp, &sender, buffer->data, buffer->size);
388 VLOG_WARN_RL(&rl, "received too-short OpenFlow message");
390 ofpbuf_delete(buffer);
392 if (r->n_txq < TXQ_LIMIT) {
393 int error = r->cb_dump(dp, r->cb_aux);
396 VLOG_WARN_RL(&rl, "dump callback error: %s",
399 r->cb_done(r->cb_aux);
408 if (!rconn_is_alive(r->rconn)) {
414 remote_wait(struct remote *r)
416 rconn_run_wait(r->rconn);
417 rconn_recv_wait(r->rconn);
421 remote_destroy(struct remote *r)
424 if (r->cb_dump && r->cb_done) {
425 r->cb_done(r->cb_aux);
427 list_remove(&r->node);
428 rconn_destroy(r->rconn);
433 static struct remote *
434 remote_create(struct datapath *dp, struct rconn *rconn)
436 struct remote *remote = xmalloc(sizeof *remote);
437 list_push_back(&dp->remotes, &remote->node);
438 remote->rconn = rconn;
439 remote->cb_dump = NULL;
444 /* Starts a callback-based, reliable, possibly multi-message reply to a
445 * request made by 'remote'.
447 * 'dump' designates a function that will be called when the 'remote' send
448 * queue has an empty slot. It should compose a message and send it on
449 * 'remote'. On success, it should return 1 if it should be called again when
450 * another send queue slot opens up, 0 if its transmissions are complete, or a
451 * negative errno value on failure.
453 * 'done' designates a function to clean up any resources allocated for the
454 * dump. It must handle being called before the dump is complete (which will
455 * happen if 'remote' is closed unexpectedly).
457 * 'aux' is passed to 'dump' and 'done'. */
459 remote_start_dump(struct remote *remote,
460 int (*dump)(struct datapath *, void *),
461 void (*done)(void *),
464 assert(!remote->cb_dump);
465 remote->cb_dump = dump;
466 remote->cb_done = done;
467 remote->cb_aux = aux;
471 dp_wait(struct datapath *dp)
476 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
477 netdev_recv_wait(p->netdev);
479 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
482 if (dp->listen_pvconn) {
483 pvconn_wait(dp->listen_pvconn);
487 /* Delete 'p' from switch. */
489 del_switch_port(struct sw_port *p)
491 send_port_status(p, OFPPR_DELETE);
492 netdev_close(p->netdev);
494 list_remove(&p->node);
498 dp_destroy(struct datapath *dp)
500 struct sw_port *p, *n;
506 LIST_FOR_EACH_SAFE (p, n, struct sw_port, node, &dp->port_list) {
509 chain_destroy(dp->chain);
513 /* Send packets out all the ports except the originating one. If the
514 * "flood" argument is set, don't send out ports with flooding disabled.
517 output_all(struct datapath *dp, struct ofpbuf *buffer, int in_port, int flood)
523 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
524 if (port_no(dp, p) == in_port) {
527 if (flood && p->config & OFPPC_NO_FLOOD) {
530 if (prev_port != -1) {
531 dp_output_port(dp, ofpbuf_clone(buffer), in_port, prev_port,
534 prev_port = port_no(dp, p);
537 dp_output_port(dp, buffer, in_port, prev_port, false);
539 ofpbuf_delete(buffer);
545 output_packet(struct datapath *dp, struct ofpbuf *buffer, int out_port)
547 if (out_port >= 0 && out_port < OFPP_MAX) {
548 struct sw_port *p = &dp->ports[out_port];
549 if (p->netdev != NULL && !(p->config & OFPPC_PORT_DOWN)) {
550 if (!netdev_send(p->netdev, buffer)) {
552 p->tx_bytes += buffer->size;
560 ofpbuf_delete(buffer);
561 VLOG_DBG_RL(&rl, "can't forward to bad port %d\n", out_port);
564 /* Takes ownership of 'buffer' and transmits it to 'out_port' on 'dp'.
567 dp_output_port(struct datapath *dp, struct ofpbuf *buffer,
568 int in_port, int out_port, bool ignore_no_fwd)
572 if (out_port == OFPP_FLOOD) {
573 output_all(dp, buffer, in_port, 1);
574 } else if (out_port == OFPP_ALL) {
575 output_all(dp, buffer, in_port, 0);
576 } else if (out_port == OFPP_CONTROLLER) {
577 dp_output_control(dp, buffer, in_port, 0, OFPR_ACTION);
578 } else if (out_port == OFPP_IN_PORT) {
579 output_packet(dp, buffer, in_port);
580 } else if (out_port == OFPP_TABLE) {
581 struct sw_port *p = in_port < OFPP_MAX ? &dp->ports[in_port] : 0;
582 if (run_flow_through_tables(dp, buffer, p)) {
583 ofpbuf_delete(buffer);
586 if (in_port == out_port) {
587 VLOG_DBG_RL(&rl, "can't directly forward to input port");
590 output_packet(dp, buffer, out_port);
595 make_openflow_reply(size_t openflow_len, uint8_t type,
596 const struct sender *sender, struct ofpbuf **bufferp)
598 return make_openflow_xid(openflow_len, type, sender ? sender->xid : 0,
603 send_openflow_buffer(struct datapath *dp, struct ofpbuf *buffer,
604 const struct sender *sender)
606 struct remote *remote = sender ? sender->remote : dp->controller;
607 struct rconn *rconn = remote->rconn;
610 update_openflow_length(buffer);
611 retval = rconn_send_with_limit(rconn, buffer, &remote->n_txq, TXQ_LIMIT);
613 VLOG_WARN_RL(&rl, "send to %s failed: %s",
614 rconn_get_name(rconn), strerror(retval));
619 /* Takes ownership of 'buffer' and transmits it to 'dp''s controller. If the
620 * packet can be saved in a buffer, then only the first max_len bytes of
621 * 'buffer' are sent; otherwise, all of 'buffer' is sent. 'reason' indicates
622 * why 'buffer' is being sent. 'max_len' sets the maximum number of bytes that
623 * the caller wants to be sent; a value of 0 indicates the entire packet should
626 dp_output_control(struct datapath *dp, struct ofpbuf *buffer, int in_port,
627 size_t max_len, int reason)
629 struct ofp_packet_in *opi;
633 buffer_id = save_buffer(buffer);
634 total_len = buffer->size;
635 if (buffer_id != UINT32_MAX && max_len && buffer->size > max_len) {
636 buffer->size = max_len;
639 opi = ofpbuf_push_uninit(buffer, offsetof(struct ofp_packet_in, data));
640 opi->header.version = OFP_VERSION;
641 opi->header.type = OFPT_PACKET_IN;
642 opi->header.length = htons(buffer->size);
643 opi->header.xid = htonl(0);
644 opi->buffer_id = htonl(buffer_id);
645 opi->total_len = htons(total_len);
646 opi->in_port = htons(in_port);
647 opi->reason = reason;
649 send_openflow_buffer(dp, buffer, NULL);
652 static void fill_port_desc(struct datapath *dp, struct sw_port *p,
653 struct ofp_phy_port *desc)
655 desc->port_no = htons(port_no(dp, p));
656 strncpy((char *) desc->name, netdev_get_name(p->netdev),
658 desc->name[sizeof desc->name - 1] = '\0';
659 memcpy(desc->hw_addr, netdev_get_etheraddr(p->netdev), ETH_ADDR_LEN);
660 desc->config = htonl(p->config);
661 desc->state = htonl(p->state);
662 desc->curr = htonl(netdev_get_features(p->netdev, NETDEV_FEAT_CURRENT));
663 desc->supported = htonl(netdev_get_features(p->netdev,
664 NETDEV_FEAT_SUPPORTED));
665 desc->advertised = htonl(netdev_get_features(p->netdev,
666 NETDEV_FEAT_ADVERTISED));
667 desc->peer = htonl(netdev_get_features(p->netdev, NETDEV_FEAT_PEER));
671 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
673 struct ofpbuf *buffer;
674 struct ofp_switch_features *ofr;
677 ofr = make_openflow_reply(sizeof *ofr, OFPT_FEATURES_REPLY,
679 ofr->datapath_id = htonll(dp->id);
680 ofr->n_tables = dp->chain->n_tables;
681 ofr->n_buffers = htonl(N_PKT_BUFFERS);
682 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
683 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
684 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
685 struct ofp_phy_port *opp = ofpbuf_put_uninit(buffer, sizeof *opp);
686 memset(opp, 0, sizeof *opp);
687 fill_port_desc(dp, p, opp);
689 send_openflow_buffer(dp, buffer, sender);
693 dp_update_port_flags(struct datapath *dp, const struct ofp_port_mod *opm)
695 int port_no = ntohs(opm->port_no);
696 if (port_no < OFPP_MAX) {
697 struct sw_port *p = &dp->ports[port_no];
699 /* Make sure the port id hasn't changed since this was sent */
700 if (!p || memcmp(opm->hw_addr, netdev_get_etheraddr(p->netdev),
701 ETH_ADDR_LEN) != 0) {
707 uint32_t config_mask = ntohl(opm->mask);
708 p->config &= ~config_mask;
709 p->config |= ntohl(opm->config) & config_mask;
712 if (opm->mask & htonl(OFPPC_PORT_DOWN)) {
713 if ((opm->config & htonl(OFPPC_PORT_DOWN))
714 && (p->config & OFPPC_PORT_DOWN) == 0) {
715 p->config |= OFPPC_PORT_DOWN;
716 netdev_turn_flags_off(p->netdev, NETDEV_UP, true);
717 } else if ((opm->config & htonl(OFPPC_PORT_DOWN)) == 0
718 && (p->config & OFPPC_PORT_DOWN)) {
719 p->config &= ~OFPPC_PORT_DOWN;
720 netdev_turn_flags_on(p->netdev, NETDEV_UP, true);
726 /* Update the port status field of the bridge port. A non-zero return
727 * value indicates some field has changed.
729 * NB: Callers of this function may hold the RCU read lock, so any
730 * additional checks must not sleep.
733 update_port_status(struct sw_port *p)
736 enum netdev_flags flags;
737 uint32_t orig_config = p->config;
738 uint32_t orig_state = p->state;
740 if (netdev_get_flags(p->netdev, &flags) < 0) {
741 VLOG_WARN_RL(&rl, "could not get netdev flags for %s",
742 netdev_get_name(p->netdev));
745 if (flags & NETDEV_UP) {
746 p->config &= ~OFPPC_PORT_DOWN;
748 p->config |= OFPPC_PORT_DOWN;
752 /* Not all cards support this getting link status, so don't warn on
754 retval = netdev_get_link_status(p->netdev);
756 p->state &= ~OFPPS_LINK_DOWN;
757 } else if (retval == 0) {
758 p->state |= OFPPS_LINK_DOWN;
761 return ((orig_config != p->config) || (orig_state != p->state));
765 send_port_status(struct sw_port *p, uint8_t status)
767 struct ofpbuf *buffer;
768 struct ofp_port_status *ops;
769 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &buffer);
770 ops->reason = status;
771 memset(ops->pad, 0, sizeof ops->pad);
772 fill_port_desc(p->dp, p, &ops->desc);
774 send_openflow_buffer(p->dp, buffer, NULL);
778 send_flow_expired(struct datapath *dp, struct sw_flow *flow,
779 enum ofp_flow_expired_reason reason)
781 struct ofpbuf *buffer;
782 struct ofp_flow_expired *ofe;
783 ofe = make_openflow_xid(sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &buffer);
784 flow_fill_match(&ofe->match, &flow->key);
786 ofe->priority = htons(flow->priority);
787 ofe->reason = reason;
788 memset(ofe->pad, 0, sizeof ofe->pad);
790 ofe->duration = htonl(time_now() - flow->created);
791 memset(ofe->pad2, 0, sizeof ofe->pad2);
792 ofe->packet_count = htonll(flow->packet_count);
793 ofe->byte_count = htonll(flow->byte_count);
794 send_openflow_buffer(dp, buffer, NULL);
798 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
799 uint16_t type, uint16_t code, const void *data, size_t len)
801 struct ofpbuf *buffer;
802 struct ofp_error_msg *oem;
803 oem = make_openflow_reply(sizeof(*oem)+len, OFPT_ERROR, sender, &buffer);
804 oem->type = htons(type);
805 oem->code = htons(code);
806 memcpy(oem->data, data, len);
807 send_openflow_buffer(dp, buffer, sender);
811 fill_flow_stats(struct ofpbuf *buffer, struct sw_flow *flow,
812 int table_idx, time_t now)
814 struct ofp_flow_stats *ofs;
815 int length = sizeof *ofs + sizeof *ofs->actions * flow->sf_acts->n_actions;
816 ofs = ofpbuf_put_uninit(buffer, length);
817 ofs->length = htons(length);
818 ofs->table_id = table_idx;
820 ofs->match.wildcards = htonl(flow->key.wildcards);
821 ofs->match.in_port = flow->key.flow.in_port;
822 memcpy(ofs->match.dl_src, flow->key.flow.dl_src, ETH_ADDR_LEN);
823 memcpy(ofs->match.dl_dst, flow->key.flow.dl_dst, ETH_ADDR_LEN);
824 ofs->match.dl_vlan = flow->key.flow.dl_vlan;
825 ofs->match.dl_type = flow->key.flow.dl_type;
826 ofs->match.nw_src = flow->key.flow.nw_src;
827 ofs->match.nw_dst = flow->key.flow.nw_dst;
828 ofs->match.nw_proto = flow->key.flow.nw_proto;
830 ofs->match.tp_src = flow->key.flow.tp_src;
831 ofs->match.tp_dst = flow->key.flow.tp_dst;
832 ofs->duration = htonl(now - flow->created);
833 ofs->priority = htons(flow->priority);
834 ofs->idle_timeout = htons(flow->idle_timeout);
835 ofs->hard_timeout = htons(flow->hard_timeout);
836 memset(ofs->pad2, 0, sizeof ofs->pad2);
837 ofs->packet_count = htonll(flow->packet_count);
838 ofs->byte_count = htonll(flow->byte_count);
839 memcpy(ofs->actions, flow->sf_acts->actions,
840 sizeof *ofs->actions * flow->sf_acts->n_actions);
844 /* 'buffer' was received on 'p', which may be a a physical switch port or a
845 * null pointer. Process it according to 'dp''s flow table. Returns 0 if
846 * successful, in which case 'buffer' is destroyed, or -ESRCH if there is no
847 * matching flow, in which case 'buffer' still belongs to the caller. */
848 int run_flow_through_tables(struct datapath *dp, struct ofpbuf *buffer,
851 struct sw_flow_key key;
852 struct sw_flow *flow;
855 if (flow_extract(buffer, p ? port_no(dp, p) : OFPP_NONE, &key.flow)
856 && (dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
858 ofpbuf_delete(buffer);
861 if (p && p->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP)
862 && p->config & (!eth_addr_equals(key.flow.dl_dst, stp_eth_addr)
863 ? OFPPC_NO_RECV : OFPPC_NO_RECV_STP)) {
864 ofpbuf_delete(buffer);
868 flow = chain_lookup(dp->chain, &key);
870 flow_used(flow, buffer);
871 execute_actions(dp, buffer, port_no(dp, p),
872 &key, flow->sf_acts->actions,
873 flow->sf_acts->n_actions, false);
880 /* 'buffer' was received on 'p', which may be a a physical switch port or a
881 * null pointer. Process it according to 'dp''s flow table, sending it up to
882 * the controller if no flow matches. Takes ownership of 'buffer'. */
883 void fwd_port_input(struct datapath *dp, struct ofpbuf *buffer,
886 if (run_flow_through_tables(dp, buffer, p)) {
887 dp_output_control(dp, buffer, port_no(dp, p),
888 dp->miss_send_len, OFPR_NO_MATCH);
893 do_output(struct datapath *dp, struct ofpbuf *buffer, int in_port,
894 size_t max_len, int out_port, bool ignore_no_fwd)
896 if (out_port != OFPP_CONTROLLER) {
897 dp_output_port(dp, buffer, in_port, out_port, ignore_no_fwd);
899 dp_output_control(dp, buffer, in_port, max_len, OFPR_ACTION);
904 execute_actions(struct datapath *dp, struct ofpbuf *buffer,
905 int in_port, const struct sw_flow_key *key,
906 const struct ofp_action *actions, int n_actions,
909 /* Every output action needs a separate clone of 'buffer', but the common
910 * case is just a single output action, so that doing a clone and then
911 * freeing the original buffer is wasteful. So the following code is
912 * slightly obscure just to avoid that. */
914 size_t max_len=0; /* Initialze to make compiler happy */
919 eth_proto = ntohs(key->flow.dl_type);
921 for (i = 0; i < n_actions; i++) {
922 const struct ofp_action *a = &actions[i];
923 struct eth_header *eh = buffer->l2;
925 if (prev_port != -1) {
926 do_output(dp, ofpbuf_clone(buffer), in_port, max_len, prev_port,
931 switch (ntohs(a->type)) {
933 prev_port = ntohs(a->arg.output.port);
934 max_len = ntohs(a->arg.output.max_len);
937 case OFPAT_SET_DL_VLAN:
938 modify_vlan(buffer, key, a);
941 case OFPAT_SET_DL_SRC:
942 memcpy(eh->eth_src, a->arg.dl_addr, sizeof eh->eth_src);
945 case OFPAT_SET_DL_DST:
946 memcpy(eh->eth_dst, a->arg.dl_addr, sizeof eh->eth_dst);
949 case OFPAT_SET_NW_SRC:
950 case OFPAT_SET_NW_DST:
951 modify_nh(buffer, eth_proto, key->flow.nw_proto, a);
954 case OFPAT_SET_TP_SRC:
955 case OFPAT_SET_TP_DST:
956 modify_th(buffer, eth_proto, key->flow.nw_proto, a);
964 do_output(dp, buffer, in_port, max_len, prev_port, ignore_no_fwd);
966 ofpbuf_delete(buffer);
969 static void modify_nh(struct ofpbuf *buffer, uint16_t eth_proto,
970 uint8_t nw_proto, const struct ofp_action *a)
972 if (eth_proto == ETH_TYPE_IP) {
973 struct ip_header *nh = buffer->l3;
974 uint32_t new, *field;
976 new = a->arg.nw_addr;
977 field = a->type == OFPAT_SET_NW_SRC ? &nh->ip_src : &nh->ip_dst;
978 if (nw_proto == IP_TYPE_TCP) {
979 struct tcp_header *th = buffer->l4;
980 th->tcp_csum = recalc_csum32(th->tcp_csum, *field, new);
981 } else if (nw_proto == IP_TYPE_UDP) {
982 struct udp_header *th = buffer->l4;
984 th->udp_csum = recalc_csum32(th->udp_csum, *field, new);
986 th->udp_csum = 0xffff;
990 nh->ip_csum = recalc_csum32(nh->ip_csum, *field, new);
995 static void modify_th(struct ofpbuf *buffer, uint16_t eth_proto,
996 uint8_t nw_proto, const struct ofp_action *a)
998 if (eth_proto == ETH_TYPE_IP) {
999 uint16_t new, *field;
1003 if (nw_proto == IP_TYPE_TCP) {
1004 struct tcp_header *th = buffer->l4;
1005 field = a->type == OFPAT_SET_TP_SRC ? &th->tcp_src : &th->tcp_dst;
1006 th->tcp_csum = recalc_csum16(th->tcp_csum, *field, new);
1008 } else if (nw_proto == IP_TYPE_UDP) {
1009 struct udp_header *th = buffer->l4;
1010 field = a->type == OFPAT_SET_TP_SRC ? &th->udp_src : &th->udp_dst;
1011 th->udp_csum = recalc_csum16(th->udp_csum, *field, new);
1018 modify_vlan(struct ofpbuf *buffer,
1019 const struct sw_flow_key *key, const struct ofp_action *a)
1021 uint16_t new_id = a->arg.vlan_id;
1022 struct vlan_eth_header *veh;
1024 if (new_id != htons(OFP_VLAN_NONE)) {
1025 if (key->flow.dl_vlan != htons(OFP_VLAN_NONE)) {
1026 /* Modify vlan id, but maintain other TCI values */
1028 veh->veth_tci &= ~htons(VLAN_VID);
1029 veh->veth_tci |= new_id;
1031 /* Insert new vlan id. */
1032 struct eth_header *eh = buffer->l2;
1033 struct vlan_eth_header tmp;
1034 memcpy(tmp.veth_dst, eh->eth_dst, ETH_ADDR_LEN);
1035 memcpy(tmp.veth_src, eh->eth_src, ETH_ADDR_LEN);
1036 tmp.veth_type = htons(ETH_TYPE_VLAN);
1037 tmp.veth_tci = new_id;
1038 tmp.veth_next_type = eh->eth_type;
1040 veh = ofpbuf_push_uninit(buffer, VLAN_HEADER_LEN);
1041 memcpy(veh, &tmp, sizeof tmp);
1042 buffer->l2 = (char*)buffer->l2 - VLAN_HEADER_LEN;
1045 /* Remove an existing vlan header if it exists */
1047 if (veh->veth_type == htons(ETH_TYPE_VLAN)) {
1048 struct eth_header tmp;
1050 memcpy(tmp.eth_dst, veh->veth_dst, ETH_ADDR_LEN);
1051 memcpy(tmp.eth_src, veh->veth_src, ETH_ADDR_LEN);
1052 tmp.eth_type = veh->veth_next_type;
1054 buffer->size -= VLAN_HEADER_LEN;
1055 buffer->data = (char*)buffer->data + VLAN_HEADER_LEN;
1056 buffer->l2 = (char*)buffer->l2 + VLAN_HEADER_LEN;
1057 memcpy(buffer->data, &tmp, sizeof tmp);
1063 recv_features_request(struct datapath *dp, const struct sender *sender,
1066 dp_send_features_reply(dp, sender);
1071 recv_get_config_request(struct datapath *dp, const struct sender *sender,
1074 struct ofpbuf *buffer;
1075 struct ofp_switch_config *osc;
1077 osc = make_openflow_reply(sizeof *osc, OFPT_GET_CONFIG_REPLY,
1080 osc->flags = htons(dp->flags);
1081 osc->miss_send_len = htons(dp->miss_send_len);
1083 return send_openflow_buffer(dp, buffer, sender);
1087 recv_set_config(struct datapath *dp, const struct sender *sender UNUSED,
1090 const struct ofp_switch_config *osc = msg;
1093 flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
1094 if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
1095 && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
1096 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
1099 dp->miss_send_len = ntohs(osc->miss_send_len);
1104 recv_packet_out(struct datapath *dp, const struct sender *sender UNUSED,
1107 const struct ofp_packet_out *opo = msg;
1108 struct sw_flow_key key;
1109 struct ofpbuf *buffer;
1110 int n_actions = ntohs(opo->n_actions);
1111 int act_len = n_actions * sizeof opo->actions[0];
1113 if (act_len > (ntohs(opo->header.length) - sizeof *opo)) {
1114 VLOG_DBG_RL(&rl, "message too short for number of actions");
1118 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
1119 /* FIXME: can we avoid copying data here? */
1120 int data_len = ntohs(opo->header.length) - sizeof *opo - act_len;
1121 buffer = ofpbuf_new(data_len);
1122 ofpbuf_put(buffer, &opo->actions[n_actions], data_len);
1124 buffer = retrieve_buffer(ntohl(opo->buffer_id));
1130 flow_extract(buffer, ntohs(opo->in_port), &key.flow);
1131 execute_actions(dp, buffer, ntohs(opo->in_port),
1132 &key, opo->actions, n_actions, true);
1138 recv_port_mod(struct datapath *dp, const struct sender *sender UNUSED,
1141 const struct ofp_port_mod *opm = msg;
1143 dp_update_port_flags(dp, opm);
1149 add_flow(struct datapath *dp, const struct ofp_flow_mod *ofm)
1151 int error = -ENOMEM;
1154 struct sw_flow *flow;
1157 /* To prevent loops, make sure there's no action to send to the
1158 * OFP_TABLE virtual port.
1160 n_actions = (ntohs(ofm->header.length) - sizeof *ofm)
1161 / sizeof *ofm->actions;
1162 for (i=0; i<n_actions; i++) {
1163 const struct ofp_action *a = &ofm->actions[i];
1165 if (a->type == htons(OFPAT_OUTPUT)
1166 && (a->arg.output.port == htons(OFPP_TABLE)
1167 || a->arg.output.port == htons(OFPP_NONE)
1168 || a->arg.output.port == ofm->match.in_port)) {
1169 /* xxx Send fancy new error message? */
1174 /* Allocate memory. */
1175 flow = flow_alloc(n_actions);
1179 /* Fill out flow. */
1180 flow_extract_match(&flow->key, &ofm->match);
1181 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
1182 flow->idle_timeout = ntohs(ofm->idle_timeout);
1183 flow->hard_timeout = ntohs(ofm->hard_timeout);
1184 flow->used = flow->created = time_now();
1185 flow->sf_acts->n_actions = n_actions;
1186 flow->byte_count = 0;
1187 flow->packet_count = 0;
1188 memcpy(flow->sf_acts->actions, ofm->actions,
1189 n_actions * sizeof *flow->sf_acts->actions);
1192 error = chain_insert(dp->chain, flow);
1194 goto error_free_flow;
1197 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1198 struct ofpbuf *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1200 struct sw_flow_key key;
1201 uint16_t in_port = ntohs(ofm->match.in_port);
1202 flow_used(flow, buffer);
1203 flow_extract(buffer, in_port, &key.flow);
1204 execute_actions(dp, buffer, in_port, &key,
1205 ofm->actions, n_actions, false);
1215 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1216 discard_buffer(ntohl(ofm->buffer_id));
1221 mod_flow(struct datapath *dp, const struct ofp_flow_mod *ofm)
1223 int error = -ENOMEM;
1226 struct sw_flow_key key;
1231 /* To prevent loops, make sure there's no action to send to the
1232 * OFP_TABLE virtual port.
1234 n_actions = (ntohs(ofm->header.length) - sizeof *ofm)
1235 / sizeof *ofm->actions;
1236 for (i=0; i<n_actions; i++) {
1237 const struct ofp_action *a = &ofm->actions[i];
1239 if (a->type == htons(OFPAT_OUTPUT)
1240 && (a->arg.output.port == htons(OFPP_TABLE)
1241 || a->arg.output.port == htons(OFPP_NONE)
1242 || a->arg.output.port == ofm->match.in_port)) {
1243 /* xxx Send fancy new error message? */
1248 flow_extract_match(&key, &ofm->match);
1249 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1250 strict = (ofm->command == htons(OFPFC_MODIFY_STRICT)) ? 1 : 0;
1251 chain_modify(dp->chain, &key, priority, strict, ofm->actions, n_actions);
1253 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1254 struct ofpbuf *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1256 struct sw_flow_key skb_key;
1257 uint16_t in_port = ntohs(ofm->match.in_port);
1258 flow_extract(buffer, in_port, &skb_key.flow);
1259 execute_actions(dp, buffer, in_port, &skb_key,
1260 ofm->actions, n_actions, false);
1268 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1269 discard_buffer(ntohl(ofm->buffer_id));
1274 recv_flow(struct datapath *dp, const struct sender *sender UNUSED,
1277 const struct ofp_flow_mod *ofm = msg;
1278 uint16_t command = ntohs(ofm->command);
1280 if (command == OFPFC_ADD) {
1281 return add_flow(dp, ofm);
1282 } else if ((command == OFPFC_MODIFY) || (command == OFPFC_MODIFY_STRICT)) {
1283 return mod_flow(dp, ofm);
1284 } else if (command == OFPFC_DELETE) {
1285 struct sw_flow_key key;
1286 flow_extract_match(&key, &ofm->match);
1287 return chain_delete(dp->chain, &key, 0, 0) ? 0 : -ESRCH;
1288 } else if (command == OFPFC_DELETE_STRICT) {
1289 struct sw_flow_key key;
1291 flow_extract_match(&key, &ofm->match);
1292 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1293 return chain_delete(dp->chain, &key, priority, 1) ? 0 : -ESRCH;
1299 static int desc_stats_dump(struct datapath *dp, void *state,
1300 struct ofpbuf *buffer)
1302 struct ofp_desc_stats *ods = ofpbuf_put_uninit(buffer, sizeof *ods);
1304 strncpy(ods->mfr_desc, &mfr_desc, sizeof ods->mfr_desc);
1305 strncpy(ods->hw_desc, &hw_desc, sizeof ods->hw_desc);
1306 strncpy(ods->sw_desc, &sw_desc, sizeof ods->sw_desc);
1307 strncpy(ods->serial_num, &serial_num, sizeof ods->serial_num);
1312 struct flow_stats_state {
1314 struct sw_table_position position;
1315 struct ofp_flow_stats_request rq;
1318 struct ofpbuf *buffer;
1321 #define MAX_FLOW_STATS_BYTES 4096
1323 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1326 const struct ofp_flow_stats_request *fsr = body;
1327 struct flow_stats_state *s = xmalloc(sizeof *s);
1328 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1329 memset(&s->position, 0, sizeof s->position);
1335 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1337 struct flow_stats_state *s = private;
1338 fill_flow_stats(s->buffer, flow, s->table_idx, s->now);
1339 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1342 static int flow_stats_dump(struct datapath *dp, void *state,
1343 struct ofpbuf *buffer)
1345 struct flow_stats_state *s = state;
1346 struct sw_flow_key match_key;
1348 flow_extract_match(&match_key, &s->rq.match);
1350 s->now = time_now();
1351 while (s->table_idx < dp->chain->n_tables
1352 && (s->rq.table_id == 0xff || s->rq.table_id == s->table_idx))
1354 struct sw_table *table = dp->chain->tables[s->table_idx];
1356 if (table->iterate(table, &match_key, &s->position,
1357 flow_stats_dump_callback, s))
1361 memset(&s->position, 0, sizeof s->position);
1363 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1366 static void flow_stats_done(void *state)
1371 struct aggregate_stats_state {
1372 struct ofp_aggregate_stats_request rq;
1375 static int aggregate_stats_init(struct datapath *dp,
1376 const void *body, int body_len,
1379 const struct ofp_aggregate_stats_request *rq = body;
1380 struct aggregate_stats_state *s = xmalloc(sizeof *s);
1386 static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
1388 struct ofp_aggregate_stats_reply *rpy = private;
1389 rpy->packet_count += flow->packet_count;
1390 rpy->byte_count += flow->byte_count;
1395 static int aggregate_stats_dump(struct datapath *dp, void *state,
1396 struct ofpbuf *buffer)
1398 struct aggregate_stats_state *s = state;
1399 struct ofp_aggregate_stats_request *rq = &s->rq;
1400 struct ofp_aggregate_stats_reply *rpy;
1401 struct sw_table_position position;
1402 struct sw_flow_key match_key;
1405 rpy = ofpbuf_put_uninit(buffer, sizeof *rpy);
1406 memset(rpy, 0, sizeof *rpy);
1408 flow_extract_match(&match_key, &rq->match);
1409 table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1410 memset(&position, 0, sizeof position);
1411 while (table_idx < dp->chain->n_tables
1412 && (rq->table_id == 0xff || rq->table_id == table_idx))
1414 struct sw_table *table = dp->chain->tables[table_idx];
1417 error = table->iterate(table, &match_key, &position,
1418 aggregate_stats_dump_callback, rpy);
1423 memset(&position, 0, sizeof position);
1426 rpy->packet_count = htonll(rpy->packet_count);
1427 rpy->byte_count = htonll(rpy->byte_count);
1428 rpy->flow_count = htonl(rpy->flow_count);
1432 static void aggregate_stats_done(void *state)
1437 static int table_stats_dump(struct datapath *dp, void *state,
1438 struct ofpbuf *buffer)
1441 for (i = 0; i < dp->chain->n_tables; i++) {
1442 struct ofp_table_stats *ots = ofpbuf_put_uninit(buffer, sizeof *ots);
1443 struct sw_table_stats stats;
1444 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1445 strncpy(ots->name, stats.name, sizeof ots->name);
1447 ots->wildcards = htonl(stats.wildcards);
1448 memset(ots->pad, 0, sizeof ots->pad);
1449 ots->max_entries = htonl(stats.max_flows);
1450 ots->active_count = htonl(stats.n_flows);
1451 ots->lookup_count = htonll(stats.n_lookup);
1452 ots->matched_count = htonll(stats.n_matched);
1457 struct port_stats_state {
1461 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1464 struct port_stats_state *s = xmalloc(sizeof *s);
1470 static int port_stats_dump(struct datapath *dp, void *state,
1471 struct ofpbuf *buffer)
1473 struct port_stats_state *s = state;
1476 for (i = s->port; i < OFPP_MAX; i++) {
1477 struct sw_port *p = &dp->ports[i];
1478 struct ofp_port_stats *ops;
1482 ops = ofpbuf_put_uninit(buffer, sizeof *ops);
1483 ops->port_no = htons(port_no(dp, p));
1484 memset(ops->pad, 0, sizeof ops->pad);
1485 ops->rx_packets = htonll(p->rx_packets);
1486 ops->tx_packets = htonll(p->tx_packets);
1487 ops->rx_bytes = htonll(p->rx_bytes);
1488 ops->tx_bytes = htonll(p->tx_bytes);
1489 ops->rx_dropped = htonll(-1);
1490 ops->tx_dropped = htonll(p->tx_dropped);
1491 ops->rx_errors = htonll(-1);
1492 ops->tx_errors = htonll(-1);
1493 ops->rx_frame_err = htonll(-1);
1494 ops->rx_over_err = htonll(-1);
1495 ops->rx_crc_err = htonll(-1);
1496 ops->collisions = htonll(-1);
1503 static void port_stats_done(void *state)
1509 /* Value for 'type' member of struct ofp_stats_request. */
1512 /* Minimum and maximum acceptable number of bytes in body member of
1513 * struct ofp_stats_request. */
1514 size_t min_body, max_body;
1516 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1517 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1518 * Returns zero if successful, otherwise a negative error code.
1519 * May initialize '*state' to state information. May be null if no
1520 * initialization is required.*/
1521 int (*init)(struct datapath *dp, const void *body, int body_len,
1524 /* Appends statistics for 'dp' to 'buffer', which initially contains a
1525 * struct ofp_stats_reply. On success, it should return 1 if it should be
1526 * called again later with another buffer, 0 if it is done, or a negative
1527 * errno value on failure. */
1528 int (*dump)(struct datapath *dp, void *state, struct ofpbuf *buffer);
1530 /* Cleans any state created by the init or dump functions. May be null
1531 * if no cleanup is required. */
1532 void (*done)(void *state);
1535 static const struct stats_type stats[] = {
1546 sizeof(struct ofp_flow_stats_request),
1547 sizeof(struct ofp_flow_stats_request),
1554 sizeof(struct ofp_aggregate_stats_request),
1555 sizeof(struct ofp_aggregate_stats_request),
1556 aggregate_stats_init,
1557 aggregate_stats_dump,
1558 aggregate_stats_done
1578 struct stats_dump_cb {
1580 struct ofp_stats_request *rq;
1581 struct sender sender;
1582 const struct stats_type *s;
1587 stats_dump(struct datapath *dp, void *cb_)
1589 struct stats_dump_cb *cb = cb_;
1590 struct ofp_stats_reply *osr;
1591 struct ofpbuf *buffer;
1598 osr = make_openflow_reply(sizeof *osr, OFPT_STATS_REPLY, &cb->sender,
1600 osr->type = htons(cb->s->type);
1603 err = cb->s->dump(dp, cb->state, buffer);
1609 /* Buffer might have been reallocated, so find our data again. */
1610 osr = ofpbuf_at_assert(buffer, 0, sizeof *osr);
1611 osr->flags = ntohs(OFPSF_REPLY_MORE);
1613 err2 = send_openflow_buffer(dp, buffer, &cb->sender);
1623 stats_done(void *cb_)
1625 struct stats_dump_cb *cb = cb_;
1628 cb->s->done(cb->state);
1635 recv_stats_request(struct datapath *dp, const struct sender *sender,
1638 const struct ofp_stats_request *rq = oh;
1639 size_t rq_len = ntohs(rq->header.length);
1640 const struct stats_type *st;
1641 struct stats_dump_cb *cb;
1645 type = ntohs(rq->type);
1646 for (st = stats; ; st++) {
1647 if (st >= &stats[ARRAY_SIZE(stats)]) {
1648 VLOG_WARN_RL(&rl, "received stats request of unknown type %d",
1651 } else if (type == st->type) {
1656 cb = xmalloc(sizeof *cb);
1658 cb->rq = xmemdup(rq, rq_len);
1659 cb->sender = *sender;
1663 body_len = rq_len - offsetof(struct ofp_stats_request, body);
1664 if (body_len < cb->s->min_body || body_len > cb->s->max_body) {
1665 VLOG_WARN_RL(&rl, "stats request type %d with bad body length %d",
1672 err = cb->s->init(dp, rq->body, body_len, &cb->state);
1675 "failed initialization of stats request type %d: %s",
1676 type, strerror(-err));
1681 remote_start_dump(sender->remote, stats_dump, stats_done, cb);
1691 recv_echo_request(struct datapath *dp, const struct sender *sender,
1694 return send_openflow_buffer(dp, make_echo_reply(oh), sender);
1698 recv_echo_reply(struct datapath *dp UNUSED, const struct sender *sender UNUSED,
1699 const void *oh UNUSED)
1704 /* 'msg', which is 'length' bytes long, was received from the control path.
1705 * Apply it to 'chain'. */
1707 fwd_control_input(struct datapath *dp, const struct sender *sender,
1708 const void *msg, size_t length)
1710 int (*handler)(struct datapath *, const struct sender *, const void *);
1711 struct ofp_header *oh;
1714 /* Check encapsulated length. */
1715 oh = (struct ofp_header *) msg;
1716 if (ntohs(oh->length) > length) {
1719 assert(oh->version == OFP_VERSION);
1721 /* Figure out how to handle it. */
1723 case OFPT_FEATURES_REQUEST:
1724 min_size = sizeof(struct ofp_header);
1725 handler = recv_features_request;
1727 case OFPT_GET_CONFIG_REQUEST:
1728 min_size = sizeof(struct ofp_header);
1729 handler = recv_get_config_request;
1731 case OFPT_SET_CONFIG:
1732 min_size = sizeof(struct ofp_switch_config);
1733 handler = recv_set_config;
1735 case OFPT_PACKET_OUT:
1736 min_size = sizeof(struct ofp_packet_out);
1737 handler = recv_packet_out;
1740 min_size = sizeof(struct ofp_flow_mod);
1741 handler = recv_flow;
1744 min_size = sizeof(struct ofp_port_mod);
1745 handler = recv_port_mod;
1747 case OFPT_STATS_REQUEST:
1748 min_size = sizeof(struct ofp_stats_request);
1749 handler = recv_stats_request;
1751 case OFPT_ECHO_REQUEST:
1752 min_size = sizeof(struct ofp_header);
1753 handler = recv_echo_request;
1755 case OFPT_ECHO_REPLY:
1756 min_size = sizeof(struct ofp_header);
1757 handler = recv_echo_reply;
1760 dp_send_error_msg(dp, sender, OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE,
1766 if (length < min_size)
1768 return handler(dp, sender, msg);
1771 /* Packet buffering. */
1773 #define OVERWRITE_SECS 1
1775 struct packet_buffer {
1776 struct ofpbuf *buffer;
1781 static struct packet_buffer buffers[N_PKT_BUFFERS];
1782 static unsigned int buffer_idx;
1784 uint32_t save_buffer(struct ofpbuf *buffer)
1786 struct packet_buffer *p;
1789 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
1790 p = &buffers[buffer_idx];
1792 /* Don't buffer packet if existing entry is less than
1793 * OVERWRITE_SECS old. */
1794 if (time_now() < p->timeout) { /* FIXME */
1797 ofpbuf_delete(p->buffer);
1800 /* Don't use maximum cookie value since the all-bits-1 id is
1802 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
1804 p->buffer = ofpbuf_clone(buffer); /* FIXME */
1805 p->timeout = time_now() + OVERWRITE_SECS; /* FIXME */
1806 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
1811 static struct ofpbuf *retrieve_buffer(uint32_t id)
1813 struct ofpbuf *buffer = NULL;
1814 struct packet_buffer *p;
1816 p = &buffers[id & PKT_BUFFER_MASK];
1817 if (p->cookie == id >> PKT_BUFFER_BITS) {
1821 printf("cookie mismatch: %x != %x\n",
1822 id >> PKT_BUFFER_BITS, p->cookie);
1828 static void discard_buffer(uint32_t id)
1830 struct packet_buffer *p;
1832 p = &buffers[id & PKT_BUFFER_MASK];
1833 if (p->cookie == id >> PKT_BUFFER_BITS) {
1834 ofpbuf_delete(p->buffer);