1 /* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
4 * We are making the OpenFlow specification and associated documentation
5 * (Software) available for public use and benefit with the expectation
6 * that others will use, modify and enhance the Software and contribute
7 * those enhancements back to the community. However, since we would
8 * like to make the Software available for broadest use, with as few
9 * restrictions as possible permission is hereby granted, free of
10 * charge, to any person obtaining a copy of this Software to deal in
11 * the Software under the copyrights without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * The name and trademarks of copyright holder(s) may NOT be used in
30 * advertising or publicity pertaining to the Software or any
31 * derivatives without specific, written prior permission.
35 #include <arpa/inet.h>
47 #include "poll-loop.h"
54 #define THIS_MODULE VLM_datapath
57 #define BRIDGE_PORT_NO_FLOOD 0x00000001
59 /* Capabilities supported by this implementation. */
60 #define OFP_SUPPORTED_CAPABILITIES ( OFPC_FLOW_STATS \
65 /* Actions supported by this implementation. */
66 #define OFP_SUPPORTED_ACTIONS ( (1 << OFPAT_OUTPUT) \
67 | (1 << OFPAT_SET_DL_VLAN) \
68 | (1 << OFPAT_SET_DL_SRC) \
69 | (1 << OFPAT_SET_DL_DST) \
70 | (1 << OFPAT_SET_NW_SRC) \
71 | (1 << OFPAT_SET_NW_DST) \
72 | (1 << OFPAT_SET_TP_SRC) \
73 | (1 << OFPAT_SET_TP_DST) )
78 struct netdev *netdev;
79 struct list node; /* Element in datapath.ports. */
80 unsigned long long int rx_count, tx_count, drop_count;
83 /* The origin of a received OpenFlow message, to enable sending a reply. */
85 struct remote *remote; /* The device that sent the message. */
86 uint32_t xid; /* The OpenFlow transaction ID. */
89 /* A connection to a controller or a management device. */
93 #define TXQ_LIMIT 128 /* Max number of packets to queue for tx. */
94 int n_txq; /* Number of packets queued for tx on rconn. */
96 /* Support for reliable, multi-message replies to requests.
98 * If an incoming request needs to have a reliable reply that might
99 * require multiple messages, it can use remote_start_dump() to set up
100 * a callback that will be called as buffer space for replies. */
101 int (*cb_dump)(struct datapath *, void *aux);
102 void (*cb_done)(void *aux);
107 /* Remote connections. */
108 struct remote *controller; /* Connection to controller. */
109 struct list remotes; /* All connections (including controller). */
110 struct vconn *listen_vconn;
114 /* Unique identifier for this datapath */
117 struct sw_chain *chain; /* Forwarding rules. */
119 /* Configuration set from controller. */
121 uint16_t miss_send_len;
124 struct sw_port ports[OFPP_MAX];
125 struct list port_list; /* List of ports, for flooding. */
128 static struct remote *remote_create(struct datapath *, struct rconn *);
129 static void remote_run(struct datapath *, struct remote *);
130 static void remote_wait(struct remote *);
131 static void remote_destroy(struct remote *);
133 void dp_output_port(struct datapath *, struct buffer *,
134 int in_port, int out_port);
135 void dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp);
136 void dp_output_control(struct datapath *, struct buffer *, int in_port,
137 size_t max_len, int reason);
138 static void send_flow_expired(struct datapath *, struct sw_flow *,
139 enum ofp_flow_expired_reason);
140 static void send_port_status(struct sw_port *p, uint8_t status);
141 static void del_switch_port(struct sw_port *p);
142 static void execute_actions(struct datapath *, struct buffer *,
143 int in_port, const struct sw_flow_key *,
144 const struct ofp_action *, int n_actions);
145 static void modify_vlan(struct buffer *buffer, const struct sw_flow_key *key,
146 const struct ofp_action *a);
147 static void modify_nh(struct buffer *buffer, uint16_t eth_proto,
148 uint8_t nw_proto, const struct ofp_action *a);
149 static void modify_th(struct buffer *buffer, uint16_t eth_proto,
150 uint8_t nw_proto, const struct ofp_action *a);
152 /* Buffers are identified to userspace by a 31-bit opaque ID. We divide the ID
153 * into a buffer number (low bits) and a cookie (high bits). The buffer number
154 * is an index into an array of buffers. The cookie distinguishes between
155 * different packets that have occupied a single buffer. Thus, the more
156 * buffers we have, the lower-quality the cookie... */
157 #define PKT_BUFFER_BITS 8
158 #define N_PKT_BUFFERS (1 << PKT_BUFFER_BITS)
159 #define PKT_BUFFER_MASK (N_PKT_BUFFERS - 1)
161 #define PKT_COOKIE_BITS (32 - PKT_BUFFER_BITS)
163 int run_flow_through_tables(struct datapath *, struct buffer *, int in_port);
164 void fwd_port_input(struct datapath *, struct buffer *, int in_port);
165 int fwd_control_input(struct datapath *, const struct sender *,
166 const void *, size_t);
168 uint32_t save_buffer(struct buffer *);
169 static struct buffer *retrieve_buffer(uint32_t id);
170 static void discard_buffer(uint32_t id);
172 static int port_no(struct datapath *dp, struct sw_port *p)
174 assert(p >= dp->ports && p < &dp->ports[ARRAY_SIZE(dp->ports)]);
175 return p - dp->ports;
178 /* Generates and returns a random datapath id. */
180 gen_datapath_id(void)
182 uint8_t ea[ETH_ADDR_LEN];
184 return eth_addr_to_uint64(ea);
188 dp_new(struct datapath **dp_, uint64_t dpid, struct rconn *rconn)
192 dp = calloc(1, sizeof *dp);
197 dp->last_timeout = time_now();
198 list_init(&dp->remotes);
199 dp->controller = remote_create(dp, rconn);
200 dp->listen_vconn = NULL;
201 dp->id = dpid <= UINT64_C(0xffffffffffff) ? dpid : gen_datapath_id();
202 dp->chain = chain_create();
204 VLOG_ERR("could not create chain");
209 list_init(&dp->port_list);
211 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
217 dp_add_port(struct datapath *dp, const char *name)
219 struct netdev *netdev;
225 error = netdev_open(name, NETDEV_ETH_TYPE_ANY, &netdev);
229 error = netdev_set_flags(netdev, NETDEV_UP | NETDEV_PROMISC, false);
231 VLOG_ERR("Couldn't set promiscuous mode on %s device", name);
232 netdev_close(netdev);
235 if (netdev_get_in4(netdev, &in4)) {
236 VLOG_ERR("%s device has assigned IP address %s", name, inet_ntoa(in4));
238 if (netdev_get_in6(netdev, &in6)) {
239 char in6_name[INET6_ADDRSTRLEN + 1];
240 inet_ntop(AF_INET6, &in6, in6_name, sizeof in6_name);
241 VLOG_ERR("%s device has assigned IPv6 address %s", name, in6_name);
244 for (p = dp->ports; ; p++) {
245 if (p >= &dp->ports[ARRAY_SIZE(dp->ports)]) {
247 } else if (!p->netdev) {
257 list_push_back(&dp->port_list, &p->node);
259 /* Notify the ctlpath that this port has been added */
260 send_port_status(p, OFPPR_ADD);
266 dp_add_listen_vconn(struct datapath *dp, struct vconn *listen_vconn)
268 assert(!dp->listen_vconn);
269 dp->listen_vconn = listen_vconn;
273 dp_run(struct datapath *dp)
275 time_t now = time_now();
276 struct sw_port *p, *pn;
277 struct remote *r, *rn;
278 struct buffer *buffer = NULL;
280 if (now != dp->last_timeout) {
281 struct list deleted = LIST_INITIALIZER(&deleted);
282 struct sw_flow *f, *n;
284 chain_timeout(dp->chain, &deleted);
285 LIST_FOR_EACH_SAFE (f, n, struct sw_flow, node, &deleted) {
286 send_flow_expired(dp, f, f->reason);
287 list_remove(&f->node);
290 dp->last_timeout = now;
292 poll_timer_wait(1000);
294 LIST_FOR_EACH_SAFE (p, pn, struct sw_port, node, &dp->port_list) {
298 /* Allocate buffer with some headroom to add headers in forwarding
299 * to the controller or adding a vlan tag, plus an extra 2 bytes to
300 * allow IP headers to be aligned on a 4-byte boundary. */
301 const int headroom = 128 + 2;
302 const int hard_header = VLAN_ETH_HEADER_LEN;
303 const int mtu = netdev_get_mtu(p->netdev);
304 buffer = buffer_new(headroom + hard_header + mtu);
305 buffer->data += headroom;
307 error = netdev_recv(p->netdev, buffer);
310 fwd_port_input(dp, buffer, port_no(dp, p));
312 } else if (error != EAGAIN) {
313 VLOG_ERR("Error receiving data from %s: %s",
314 netdev_get_name(p->netdev), strerror(error));
318 buffer_delete(buffer);
320 /* Talk to remotes. */
321 LIST_FOR_EACH_SAFE (r, rn, struct remote, node, &dp->remotes) {
324 if (dp->listen_vconn) {
326 struct vconn *new_vconn;
329 retval = vconn_accept(dp->listen_vconn, &new_vconn);
331 if (retval != EAGAIN) {
332 VLOG_WARN("accept failed (%s)", strerror(retval));
336 remote_create(dp, rconn_new_from_vconn("passive", new_vconn));
342 remote_run(struct datapath *dp, struct remote *r)
348 /* Do some remote processing, but cap it at a reasonable amount so that
349 * other processing doesn't starve. */
350 for (i = 0; i < 50; i++) {
352 struct buffer *buffer;
353 struct ofp_header *oh;
355 buffer = rconn_recv(r->rconn);
360 if (buffer->size >= sizeof *oh) {
361 struct sender sender;
365 sender.xid = oh->xid;
366 fwd_control_input(dp, &sender, buffer->data, buffer->size);
368 VLOG_WARN("received too-short OpenFlow message");
370 buffer_delete(buffer);
372 if (r->n_txq < TXQ_LIMIT) {
373 int error = r->cb_dump(dp, r->cb_aux);
376 VLOG_WARN("dump callback error: %s", strerror(-error));
378 r->cb_done(r->cb_aux);
387 if (!rconn_is_alive(r->rconn)) {
393 remote_wait(struct remote *r)
395 rconn_run_wait(r->rconn);
396 rconn_recv_wait(r->rconn);
400 remote_destroy(struct remote *r)
403 if (r->cb_dump && r->cb_done) {
404 r->cb_done(r->cb_aux);
406 list_remove(&r->node);
407 rconn_destroy(r->rconn);
412 static struct remote *
413 remote_create(struct datapath *dp, struct rconn *rconn)
415 struct remote *remote = xmalloc(sizeof *remote);
416 list_push_back(&dp->remotes, &remote->node);
417 remote->rconn = rconn;
418 remote->cb_dump = NULL;
422 /* Starts a callback-based, reliable, possibly multi-message reply to a
423 * request made by 'remote'.
425 * 'dump' designates a function that will be called when the 'remote' send
426 * queue has an empty slot. It should compose a message and send it on
427 * 'remote'. On success, it should return 1 if it should be called again when
428 * another send queue slot opens up, 0 if its transmissions are complete, or a
429 * negative errno value on failure.
431 * 'done' designates a function to clean up any resources allocated for the
432 * dump. It must handle being called before the dump is complete (which will
433 * happen if 'remote' is closed unexpectedly).
435 * 'aux' is passed to 'dump' and 'done'. */
437 remote_start_dump(struct remote *remote,
438 int (*dump)(struct datapath *, void *),
439 void (*done)(void *),
442 assert(!remote->cb_dump);
443 remote->cb_dump = dump;
444 remote->cb_done = done;
445 remote->cb_aux = aux;
449 dp_wait(struct datapath *dp)
454 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
455 netdev_recv_wait(p->netdev);
457 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
460 if (dp->listen_vconn) {
461 vconn_accept_wait(dp->listen_vconn);
465 /* Delete 'p' from switch. */
467 del_switch_port(struct sw_port *p)
469 send_port_status(p, OFPPR_DELETE);
470 netdev_close(p->netdev);
472 list_remove(&p->node);
476 dp_destroy(struct datapath *dp)
478 struct sw_port *p, *n;
484 LIST_FOR_EACH_SAFE (p, n, struct sw_port, node, &dp->port_list) {
487 chain_destroy(dp->chain);
491 /* Send packets out all the ports except the originating one. If the
492 * "flood" argument is set, don't send out ports with flooding disabled.
495 output_all(struct datapath *dp, struct buffer *buffer, int in_port, int flood)
501 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
502 if (port_no(dp, p) == in_port) {
505 if (flood && p->flags & BRIDGE_PORT_NO_FLOOD) {
508 if (prev_port != -1) {
509 dp_output_port(dp, buffer_clone(buffer), in_port, prev_port);
511 prev_port = port_no(dp, p);
514 dp_output_port(dp, buffer, in_port, prev_port);
516 buffer_delete(buffer);
522 output_packet(struct datapath *dp, struct buffer *buffer, int out_port)
524 if (out_port >= 0 && out_port < OFPP_MAX) {
525 struct sw_port *p = &dp->ports[out_port];
526 if (p->netdev != NULL) {
527 if (!netdev_send(p->netdev, buffer)) {
536 buffer_delete(buffer);
537 /* FIXME: ratelimit */
538 VLOG_DBG("can't forward to bad port %d\n", out_port);
541 /* Takes ownership of 'buffer' and transmits it to 'out_port' on 'dp'.
544 dp_output_port(struct datapath *dp, struct buffer *buffer,
545 int in_port, int out_port)
549 if (out_port == OFPP_FLOOD) {
550 output_all(dp, buffer, in_port, 1);
551 } else if (out_port == OFPP_ALL) {
552 output_all(dp, buffer, in_port, 0);
553 } else if (out_port == OFPP_CONTROLLER) {
554 dp_output_control(dp, buffer, in_port, 0, OFPR_ACTION);
555 } else if (out_port == OFPP_TABLE) {
556 if (run_flow_through_tables(dp, buffer, in_port)) {
557 buffer_delete(buffer);
560 output_packet(dp, buffer, out_port);
565 make_openflow_reply(size_t openflow_len, uint8_t type,
566 const struct sender *sender, struct buffer **bufferp)
568 return make_openflow_xid(openflow_len, type, sender ? sender->xid : 0,
573 send_openflow_buffer(struct datapath *dp, struct buffer *buffer,
574 const struct sender *sender)
576 struct remote *remote = sender ? sender->remote : dp->controller;
577 struct rconn *rconn = remote->rconn;
580 update_openflow_length(buffer);
581 retval = (remote->n_txq < TXQ_LIMIT
582 ? rconn_send(rconn, buffer, &remote->n_txq)
585 VLOG_WARN("send to %s failed: %s",
586 rconn_get_name(rconn), strerror(retval));
587 buffer_delete(buffer);
592 /* Takes ownership of 'buffer' and transmits it to 'dp''s controller. If the
593 * packet can be saved in a buffer, then only the first max_len bytes of
594 * 'buffer' are sent; otherwise, all of 'buffer' is sent. 'reason' indicates
595 * why 'buffer' is being sent. 'max_len' sets the maximum number of bytes that
596 * the caller wants to be sent; a value of 0 indicates the entire packet should
599 dp_output_control(struct datapath *dp, struct buffer *buffer, int in_port,
600 size_t max_len, int reason)
602 struct ofp_packet_in *opi;
606 buffer_id = save_buffer(buffer);
607 total_len = buffer->size;
608 if (buffer_id != UINT32_MAX && buffer->size > max_len) {
609 buffer->size = max_len;
612 opi = buffer_push_uninit(buffer, offsetof(struct ofp_packet_in, data));
613 opi->header.version = OFP_VERSION;
614 opi->header.type = OFPT_PACKET_IN;
615 opi->header.length = htons(buffer->size);
616 opi->header.xid = htonl(0);
617 opi->buffer_id = htonl(buffer_id);
618 opi->total_len = htons(total_len);
619 opi->in_port = htons(in_port);
620 opi->reason = reason;
622 send_openflow_buffer(dp, buffer, NULL);
625 static void fill_port_desc(struct datapath *dp, struct sw_port *p,
626 struct ofp_phy_port *desc)
628 desc->port_no = htons(port_no(dp, p));
629 strncpy((char *) desc->name, netdev_get_name(p->netdev),
631 desc->name[sizeof desc->name - 1] = '\0';
632 memcpy(desc->hw_addr, netdev_get_etheraddr(p->netdev), ETH_ADDR_LEN);
633 desc->flags = htonl(p->flags);
634 desc->features = htonl(netdev_get_features(p->netdev));
635 desc->speed = htonl(netdev_get_speed(p->netdev));
639 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
641 struct buffer *buffer;
642 struct ofp_switch_features *ofr;
645 ofr = make_openflow_reply(sizeof *ofr, OFPT_FEATURES_REPLY,
647 ofr->datapath_id = htonll(dp->id);
648 ofr->n_exact = htonl(2 * TABLE_HASH_MAX_FLOWS);
649 ofr->n_compression = 0; /* Not supported */
650 ofr->n_general = htonl(TABLE_LINEAR_MAX_FLOWS);
651 ofr->buffer_mb = htonl(UINT32_MAX);
652 ofr->n_buffers = htonl(N_PKT_BUFFERS);
653 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
654 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
655 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
656 struct ofp_phy_port *opp = buffer_put_uninit(buffer, sizeof *opp);
657 memset(opp, 0, sizeof *opp);
658 fill_port_desc(dp, p, opp);
660 send_openflow_buffer(dp, buffer, sender);
664 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
666 int port_no = ntohs(opp->port_no);
667 if (port_no < OFPP_MAX) {
668 struct sw_port *p = &dp->ports[port_no];
670 /* Make sure the port id hasn't changed since this was sent */
671 if (!p || memcmp(opp->hw_addr, netdev_get_etheraddr(p->netdev),
672 ETH_ADDR_LEN) != 0) {
675 p->flags = htonl(opp->flags);
680 send_port_status(struct sw_port *p, uint8_t status)
682 struct buffer *buffer;
683 struct ofp_port_status *ops;
684 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &buffer);
685 ops->reason = status;
686 memset(ops->pad, 0, sizeof ops->pad);
687 fill_port_desc(p->dp, p, &ops->desc);
689 send_openflow_buffer(p->dp, buffer, NULL);
693 send_flow_expired(struct datapath *dp, struct sw_flow *flow,
694 enum ofp_flow_expired_reason reason)
696 struct buffer *buffer;
697 struct ofp_flow_expired *ofe;
698 ofe = make_openflow_xid(sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &buffer);
699 flow_fill_match(&ofe->match, &flow->key);
701 ofe->priority = htons(flow->priority);
702 ofe->reason = reason;
703 memset(ofe->pad, 0, sizeof ofe->pad);
705 ofe->duration = htonl(time_now() - flow->created);
706 memset(ofe->pad2, 0, sizeof ofe->pad2);
707 ofe->packet_count = htonll(flow->packet_count);
708 ofe->byte_count = htonll(flow->byte_count);
709 send_openflow_buffer(dp, buffer, NULL);
713 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
714 uint16_t type, uint16_t code, const uint8_t *data, size_t len)
716 struct buffer *buffer;
717 struct ofp_error_msg *oem;
718 oem = make_openflow_reply(sizeof(*oem)+len, OFPT_ERROR_MSG,
720 oem->type = htons(type);
721 oem->code = htons(code);
722 memcpy(oem->data, data, len);
723 send_openflow_buffer(dp, buffer, sender);
727 fill_flow_stats(struct buffer *buffer, struct sw_flow *flow,
728 int table_idx, time_t now)
730 struct ofp_flow_stats *ofs;
731 int length = sizeof *ofs + sizeof *ofs->actions * flow->n_actions;
732 ofs = buffer_put_uninit(buffer, length);
733 ofs->length = htons(length);
734 ofs->table_id = table_idx;
736 ofs->match.wildcards = htons(flow->key.wildcards);
737 ofs->match.in_port = flow->key.flow.in_port;
738 memcpy(ofs->match.dl_src, flow->key.flow.dl_src, ETH_ADDR_LEN);
739 memcpy(ofs->match.dl_dst, flow->key.flow.dl_dst, ETH_ADDR_LEN);
740 ofs->match.dl_vlan = flow->key.flow.dl_vlan;
741 ofs->match.dl_type = flow->key.flow.dl_type;
742 ofs->match.nw_src = flow->key.flow.nw_src;
743 ofs->match.nw_dst = flow->key.flow.nw_dst;
744 ofs->match.nw_proto = flow->key.flow.nw_proto;
745 memset(ofs->match.pad, 0, sizeof ofs->match.pad);
746 ofs->match.tp_src = flow->key.flow.tp_src;
747 ofs->match.tp_dst = flow->key.flow.tp_dst;
748 ofs->duration = htonl(now - flow->created);
749 ofs->priority = htons(flow->priority);
750 ofs->idle_timeout = htons(flow->idle_timeout);
751 ofs->hard_timeout = htons(flow->hard_timeout);
752 memset(ofs->pad2, 0, sizeof ofs->pad2);
753 ofs->packet_count = htonll(flow->packet_count);
754 ofs->byte_count = htonll(flow->byte_count);
755 memcpy(ofs->actions, flow->actions,
756 sizeof *ofs->actions * flow->n_actions);
760 /* 'buffer' was received on 'in_port', a physical switch port between 0 and
761 * OFPP_MAX. Process it according to 'dp''s flow table. Returns 0 if
762 * successful, in which case 'buffer' is destroyed, or -ESRCH if there is no
763 * matching flow, in which case 'buffer' still belongs to the caller. */
764 int run_flow_through_tables(struct datapath *dp, struct buffer *buffer,
767 struct sw_flow_key key;
768 struct sw_flow *flow;
771 if (flow_extract(buffer, in_port, &key.flow)
772 && (dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
774 buffer_delete(buffer);
778 flow = chain_lookup(dp->chain, &key);
780 flow_used(flow, buffer);
781 execute_actions(dp, buffer, in_port, &key,
782 flow->actions, flow->n_actions);
789 /* 'buffer' was received on 'in_port', a physical switch port between 0 and
790 * OFPP_MAX. Process it according to 'dp''s flow table, sending it up to the
791 * controller if no flow matches. Takes ownership of 'buffer'. */
792 void fwd_port_input(struct datapath *dp, struct buffer *buffer, int in_port)
794 if (run_flow_through_tables(dp, buffer, in_port)) {
795 dp_output_control(dp, buffer, in_port, dp->miss_send_len,
801 do_output(struct datapath *dp, struct buffer *buffer, int in_port,
802 size_t max_len, int out_port)
804 if (out_port != OFPP_CONTROLLER) {
805 dp_output_port(dp, buffer, in_port, out_port);
807 dp_output_control(dp, buffer, in_port, max_len, OFPR_ACTION);
812 execute_actions(struct datapath *dp, struct buffer *buffer,
813 int in_port, const struct sw_flow_key *key,
814 const struct ofp_action *actions, int n_actions)
816 /* Every output action needs a separate clone of 'buffer', but the common
817 * case is just a single output action, so that doing a clone and then
818 * freeing the original buffer is wasteful. So the following code is
819 * slightly obscure just to avoid that. */
821 size_t max_len=0; /* Initialze to make compiler happy */
826 eth_proto = ntohs(key->flow.dl_type);
828 for (i = 0; i < n_actions; i++) {
829 const struct ofp_action *a = &actions[i];
830 struct eth_header *eh = buffer->l2;
832 if (prev_port != -1) {
833 do_output(dp, buffer_clone(buffer), in_port, max_len, prev_port);
837 switch (ntohs(a->type)) {
839 prev_port = ntohs(a->arg.output.port);
840 max_len = ntohs(a->arg.output.max_len);
843 case OFPAT_SET_DL_VLAN:
844 modify_vlan(buffer, key, a);
847 case OFPAT_SET_DL_SRC:
848 memcpy(eh->eth_src, a->arg.dl_addr, sizeof eh->eth_src);
851 case OFPAT_SET_DL_DST:
852 memcpy(eh->eth_dst, a->arg.dl_addr, sizeof eh->eth_dst);
855 case OFPAT_SET_NW_SRC:
856 case OFPAT_SET_NW_DST:
857 modify_nh(buffer, eth_proto, key->flow.nw_proto, a);
860 case OFPAT_SET_TP_SRC:
861 case OFPAT_SET_TP_DST:
862 modify_th(buffer, eth_proto, key->flow.nw_proto, a);
870 do_output(dp, buffer, in_port, max_len, prev_port);
872 buffer_delete(buffer);
875 static void modify_nh(struct buffer *buffer, uint16_t eth_proto,
876 uint8_t nw_proto, const struct ofp_action *a)
878 if (eth_proto == ETH_TYPE_IP) {
879 struct ip_header *nh = buffer->l3;
880 uint32_t new, *field;
882 new = a->arg.nw_addr;
883 field = a->type == OFPAT_SET_NW_SRC ? &nh->ip_src : &nh->ip_dst;
884 if (nw_proto == IP_TYPE_TCP) {
885 struct tcp_header *th = buffer->l4;
886 th->tcp_csum = recalc_csum32(th->tcp_csum, *field, new);
887 } else if (nw_proto == IP_TYPE_UDP) {
888 struct udp_header *th = buffer->l4;
890 th->udp_csum = recalc_csum32(th->udp_csum, *field, new);
892 th->udp_csum = 0xffff;
896 nh->ip_csum = recalc_csum32(nh->ip_csum, *field, new);
901 static void modify_th(struct buffer *buffer, uint16_t eth_proto,
902 uint8_t nw_proto, const struct ofp_action *a)
904 if (eth_proto == ETH_TYPE_IP) {
905 uint16_t new, *field;
909 if (nw_proto == IP_TYPE_TCP) {
910 struct tcp_header *th = buffer->l4;
911 field = a->type == OFPAT_SET_TP_SRC ? &th->tcp_src : &th->tcp_dst;
912 th->tcp_csum = recalc_csum16(th->tcp_csum, *field, new);
914 } else if (nw_proto == IP_TYPE_UDP) {
915 struct udp_header *th = buffer->l4;
916 field = a->type == OFPAT_SET_TP_SRC ? &th->udp_src : &th->udp_dst;
917 th->udp_csum = recalc_csum16(th->udp_csum, *field, new);
924 modify_vlan(struct buffer *buffer,
925 const struct sw_flow_key *key, const struct ofp_action *a)
927 uint16_t new_id = a->arg.vlan_id;
928 struct vlan_eth_header *veh;
930 if (new_id != htons(OFP_VLAN_NONE)) {
931 if (key->flow.dl_vlan != htons(OFP_VLAN_NONE)) {
932 /* Modify vlan id, but maintain other TCI values */
934 veh->veth_tci &= ~htons(VLAN_VID);
935 veh->veth_tci |= new_id;
937 /* Insert new vlan id. */
938 struct eth_header *eh = buffer->l2;
939 struct vlan_eth_header tmp;
940 memcpy(tmp.veth_dst, eh->eth_dst, ETH_ADDR_LEN);
941 memcpy(tmp.veth_src, eh->eth_src, ETH_ADDR_LEN);
942 tmp.veth_type = htons(ETH_TYPE_VLAN);
943 tmp.veth_tci = new_id;
944 tmp.veth_next_type = eh->eth_type;
946 veh = buffer_push_uninit(buffer, VLAN_HEADER_LEN);
947 memcpy(veh, &tmp, sizeof tmp);
948 buffer->l2 -= VLAN_HEADER_LEN;
951 /* Remove an existing vlan header if it exists */
953 if (veh->veth_type == htons(ETH_TYPE_VLAN)) {
954 struct eth_header tmp;
956 memcpy(tmp.eth_dst, veh->veth_dst, ETH_ADDR_LEN);
957 memcpy(tmp.eth_src, veh->veth_src, ETH_ADDR_LEN);
958 tmp.eth_type = veh->veth_next_type;
960 buffer->size -= VLAN_HEADER_LEN;
961 buffer->data += VLAN_HEADER_LEN;
962 buffer->l2 += VLAN_HEADER_LEN;
963 memcpy(buffer->data, &tmp, sizeof tmp);
969 recv_features_request(struct datapath *dp, const struct sender *sender,
972 dp_send_features_reply(dp, sender);
977 recv_get_config_request(struct datapath *dp, const struct sender *sender,
980 struct buffer *buffer;
981 struct ofp_switch_config *osc;
983 osc = make_openflow_reply(sizeof *osc, OFPT_GET_CONFIG_REPLY,
986 osc->flags = htons(dp->flags);
987 osc->miss_send_len = htons(dp->miss_send_len);
989 return send_openflow_buffer(dp, buffer, sender);
993 recv_set_config(struct datapath *dp, const struct sender *sender UNUSED,
996 const struct ofp_switch_config *osc = msg;
999 flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
1000 if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
1001 && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
1002 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
1005 dp->miss_send_len = ntohs(osc->miss_send_len);
1010 recv_packet_out(struct datapath *dp, const struct sender *sender UNUSED,
1013 const struct ofp_packet_out *opo = msg;
1015 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
1016 /* FIXME: can we avoid copying data here? */
1017 int data_len = ntohs(opo->header.length) - sizeof *opo;
1018 struct buffer *buffer = buffer_new(data_len);
1019 buffer_put(buffer, opo->u.data, data_len);
1020 dp_output_port(dp, buffer,
1021 ntohs(opo->in_port), ntohs(opo->out_port));
1023 struct sw_flow_key key;
1024 struct buffer *buffer;
1027 buffer = retrieve_buffer(ntohl(opo->buffer_id));
1032 n_acts = (ntohs(opo->header.length) - sizeof *opo)
1033 / sizeof *opo->u.actions;
1034 flow_extract(buffer, ntohs(opo->in_port), &key.flow);
1035 execute_actions(dp, buffer, ntohs(opo->in_port),
1036 &key, opo->u.actions, n_acts);
1042 recv_port_mod(struct datapath *dp, const struct sender *sender UNUSED,
1045 const struct ofp_port_mod *opm = msg;
1047 dp_update_port_flags(dp, &opm->desc);
1053 add_flow(struct datapath *dp, const struct ofp_flow_mod *ofm)
1055 int error = -ENOMEM;
1058 struct sw_flow *flow;
1061 /* To prevent loops, make sure there's no action to send to the
1062 * OFP_TABLE virtual port.
1064 n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
1065 for (i=0; i<n_acts; i++) {
1066 const struct ofp_action *a = &ofm->actions[i];
1068 if (a->type == htons(OFPAT_OUTPUT)
1069 && (a->arg.output.port == htons(OFPP_TABLE)
1070 || a->arg.output.port == htons(OFPP_NONE))) {
1071 /* xxx Send fancy new error message? */
1076 /* Allocate memory. */
1077 flow = flow_alloc(n_acts);
1081 /* Fill out flow. */
1082 flow_extract_match(&flow->key, &ofm->match);
1083 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
1084 flow->idle_timeout = ntohs(ofm->idle_timeout);
1085 flow->hard_timeout = ntohs(ofm->hard_timeout);
1086 flow->used = flow->created = time_now();
1087 flow->n_actions = n_acts;
1088 flow->byte_count = 0;
1089 flow->packet_count = 0;
1090 memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
1093 error = chain_insert(dp->chain, flow);
1095 goto error_free_flow;
1098 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1099 struct buffer *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1101 struct sw_flow_key key;
1102 uint16_t in_port = ntohs(ofm->match.in_port);
1103 flow_used(flow, buffer);
1104 flow_extract(buffer, in_port, &key.flow);
1105 execute_actions(dp, buffer, in_port, &key, ofm->actions, n_acts);
1115 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1116 discard_buffer(ntohl(ofm->buffer_id));
1121 recv_flow(struct datapath *dp, const struct sender *sender UNUSED,
1124 const struct ofp_flow_mod *ofm = msg;
1125 uint16_t command = ntohs(ofm->command);
1127 if (command == OFPFC_ADD) {
1128 return add_flow(dp, ofm);
1129 } else if (command == OFPFC_DELETE) {
1130 struct sw_flow_key key;
1131 flow_extract_match(&key, &ofm->match);
1132 return chain_delete(dp->chain, &key, 0, 0) ? 0 : -ESRCH;
1133 } else if (command == OFPFC_DELETE_STRICT) {
1134 struct sw_flow_key key;
1136 flow_extract_match(&key, &ofm->match);
1137 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1138 return chain_delete(dp->chain, &key, priority, 1) ? 0 : -ESRCH;
1144 struct flow_stats_state {
1146 struct sw_table_position position;
1147 struct ofp_flow_stats_request rq;
1150 struct buffer *buffer;
1153 #define MAX_FLOW_STATS_BYTES 4096
1155 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1158 const struct ofp_flow_stats_request *fsr = body;
1159 struct flow_stats_state *s = xmalloc(sizeof *s);
1160 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1161 memset(&s->position, 0, sizeof s->position);
1167 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1169 struct flow_stats_state *s = private;
1170 fill_flow_stats(s->buffer, flow, s->table_idx, s->now);
1171 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1174 static int flow_stats_dump(struct datapath *dp, void *state,
1175 struct buffer *buffer)
1177 struct flow_stats_state *s = state;
1178 struct sw_flow_key match_key;
1180 flow_extract_match(&match_key, &s->rq.match);
1182 s->now = time_now();
1183 while (s->table_idx < dp->chain->n_tables
1184 && (s->rq.table_id == 0xff || s->rq.table_id == s->table_idx))
1186 struct sw_table *table = dp->chain->tables[s->table_idx];
1188 if (table->iterate(table, &match_key, &s->position,
1189 flow_stats_dump_callback, s))
1193 memset(&s->position, 0, sizeof s->position);
1195 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1198 static void flow_stats_done(void *state)
1203 struct aggregate_stats_state {
1204 struct ofp_aggregate_stats_request rq;
1207 static int aggregate_stats_init(struct datapath *dp,
1208 const void *body, int body_len,
1211 const struct ofp_aggregate_stats_request *rq = body;
1212 struct aggregate_stats_state *s = xmalloc(sizeof *s);
1218 static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
1220 struct ofp_aggregate_stats_reply *rpy = private;
1221 rpy->packet_count += flow->packet_count;
1222 rpy->byte_count += flow->byte_count;
1227 static int aggregate_stats_dump(struct datapath *dp, void *state,
1228 struct buffer *buffer)
1230 struct aggregate_stats_state *s = state;
1231 struct ofp_aggregate_stats_request *rq = &s->rq;
1232 struct ofp_aggregate_stats_reply *rpy;
1233 struct sw_table_position position;
1234 struct sw_flow_key match_key;
1237 rpy = buffer_put_uninit(buffer, sizeof *rpy);
1238 memset(rpy, 0, sizeof *rpy);
1240 flow_extract_match(&match_key, &rq->match);
1241 table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1242 memset(&position, 0, sizeof position);
1243 while (table_idx < dp->chain->n_tables
1244 && (rq->table_id == 0xff || rq->table_id == table_idx))
1246 struct sw_table *table = dp->chain->tables[table_idx];
1249 error = table->iterate(table, &match_key, &position,
1250 aggregate_stats_dump_callback, rpy);
1255 memset(&position, 0, sizeof position);
1258 rpy->packet_count = htonll(rpy->packet_count);
1259 rpy->byte_count = htonll(rpy->byte_count);
1260 rpy->flow_count = htonl(rpy->flow_count);
1264 static void aggregate_stats_done(void *state)
1269 static int table_stats_dump(struct datapath *dp, void *state,
1270 struct buffer *buffer)
1273 for (i = 0; i < dp->chain->n_tables; i++) {
1274 struct ofp_table_stats *ots = buffer_put_uninit(buffer, sizeof *ots);
1275 struct sw_table_stats stats;
1276 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1277 strncpy(ots->name, stats.name, sizeof ots->name);
1279 memset(ots->pad, 0, sizeof ots->pad);
1280 ots->max_entries = htonl(stats.max_flows);
1281 ots->active_count = htonl(stats.n_flows);
1282 ots->matched_count = htonll(stats.n_matched);
1287 struct port_stats_state {
1291 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1294 struct port_stats_state *s = xmalloc(sizeof *s);
1300 static int port_stats_dump(struct datapath *dp, void *state,
1301 struct buffer *buffer)
1303 struct port_stats_state *s = state;
1306 for (i = s->port; i < OFPP_MAX; i++) {
1307 struct sw_port *p = &dp->ports[i];
1308 struct ofp_port_stats *ops;
1312 ops = buffer_put_uninit(buffer, sizeof *ops);
1313 ops->port_no = htons(port_no(dp, p));
1314 memset(ops->pad, 0, sizeof ops->pad);
1315 ops->rx_count = htonll(p->rx_count);
1316 ops->tx_count = htonll(p->tx_count);
1317 ops->drop_count = htonll(p->drop_count);
1324 static void port_stats_done(void *state)
1330 /* Minimum and maximum acceptable number of bytes in body member of
1331 * struct ofp_stats_request. */
1332 size_t min_body, max_body;
1334 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1335 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1336 * Returns zero if successful, otherwise a negative error code.
1337 * May initialize '*state' to state information. May be null if no
1338 * initialization is required.*/
1339 int (*init)(struct datapath *dp, const void *body, int body_len,
1342 /* Appends statistics for 'dp' to 'buffer', which initially contains a
1343 * struct ofp_stats_reply. On success, it should return 1 if it should be
1344 * called again later with another buffer, 0 if it is done, or a negative
1345 * errno value on failure. */
1346 int (*dump)(struct datapath *dp, void *state, struct buffer *buffer);
1348 /* Cleans any state created by the init or dump functions. May be null
1349 * if no cleanup is required. */
1350 void (*done)(void *state);
1353 static const struct stats_type stats[] = {
1355 sizeof(struct ofp_flow_stats_request),
1356 sizeof(struct ofp_flow_stats_request),
1361 [OFPST_AGGREGATE] = {
1362 sizeof(struct ofp_aggregate_stats_request),
1363 sizeof(struct ofp_aggregate_stats_request),
1364 aggregate_stats_init,
1365 aggregate_stats_dump,
1366 aggregate_stats_done
1384 struct stats_dump_cb {
1386 struct ofp_stats_request *rq;
1387 struct sender sender;
1388 const struct stats_type *s;
1393 stats_dump(struct datapath *dp, void *cb_)
1395 struct stats_dump_cb *cb = cb_;
1396 struct ofp_stats_reply *osr;
1397 struct buffer *buffer;
1404 osr = make_openflow_reply(sizeof *osr, OFPT_STATS_REPLY, &cb->sender,
1406 osr->type = htons(cb->s - stats);
1409 err = cb->s->dump(dp, cb->state, buffer);
1415 /* Buffer might have been reallocated, so find our data again. */
1416 osr = buffer_at_assert(buffer, 0, sizeof *osr);
1417 osr->flags = ntohs(OFPSF_REPLY_MORE);
1419 err2 = send_openflow_buffer(dp, buffer, &cb->sender);
1429 stats_done(void *cb_)
1431 struct stats_dump_cb *cb = cb_;
1434 cb->s->done(cb->state);
1441 recv_stats_request(struct datapath *dp, const struct sender *sender,
1444 const struct ofp_stats_request *rq = oh;
1445 size_t rq_len = ntohs(rq->header.length);
1446 struct stats_dump_cb *cb;
1450 type = ntohs(rq->type);
1451 if (type >= ARRAY_SIZE(stats) || !stats[type].dump) {
1452 VLOG_WARN("received stats request of unknown type %d", type);
1456 cb = xmalloc(sizeof *cb);
1458 cb->rq = xmemdup(rq, rq_len);
1459 cb->sender = *sender;
1460 cb->s = &stats[type];
1463 body_len = rq_len - offsetof(struct ofp_stats_request, body);
1464 if (body_len < cb->s->min_body || body_len > cb->s->max_body) {
1465 VLOG_WARN("stats request type %d with bad body length %d",
1472 err = cb->s->init(dp, rq->body, body_len, &cb->state);
1474 VLOG_WARN("failed initialization of stats request type %d: %s",
1475 type, strerror(-err));
1480 remote_start_dump(sender->remote, stats_dump, stats_done, cb);
1490 recv_echo_request(struct datapath *dp, const struct sender *sender,
1493 return send_openflow_buffer(dp, make_echo_reply(oh), sender);
1497 recv_echo_reply(struct datapath *dp UNUSED, const struct sender *sender UNUSED,
1498 const void *oh UNUSED)
1503 /* 'msg', which is 'length' bytes long, was received from the control path.
1504 * Apply it to 'chain'. */
1506 fwd_control_input(struct datapath *dp, const struct sender *sender,
1507 const void *msg, size_t length)
1509 struct openflow_packet {
1511 int (*handler)(struct datapath *, const struct sender *, const void *);
1514 static const struct openflow_packet packets[] = {
1515 [OFPT_FEATURES_REQUEST] = {
1516 sizeof (struct ofp_header),
1517 recv_features_request,
1519 [OFPT_GET_CONFIG_REQUEST] = {
1520 sizeof (struct ofp_header),
1521 recv_get_config_request,
1523 [OFPT_SET_CONFIG] = {
1524 sizeof (struct ofp_switch_config),
1527 [OFPT_PACKET_OUT] = {
1528 sizeof (struct ofp_packet_out),
1532 sizeof (struct ofp_flow_mod),
1536 sizeof (struct ofp_port_mod),
1539 [OFPT_STATS_REQUEST] = {
1540 sizeof (struct ofp_stats_request),
1543 [OFPT_ECHO_REQUEST] = {
1544 sizeof (struct ofp_header),
1547 [OFPT_ECHO_REPLY] = {
1548 sizeof (struct ofp_header),
1553 const struct openflow_packet *pkt;
1554 struct ofp_header *oh;
1556 oh = (struct ofp_header *) msg;
1557 assert(oh->version == OFP_VERSION);
1558 if (oh->type >= ARRAY_SIZE(packets) || ntohs(oh->length) > length)
1561 pkt = &packets[oh->type];
1564 if (length < pkt->min_size)
1567 return pkt->handler(dp, sender, msg);
1570 /* Packet buffering. */
1572 #define OVERWRITE_SECS 1
1574 struct packet_buffer {
1575 struct buffer *buffer;
1580 static struct packet_buffer buffers[N_PKT_BUFFERS];
1581 static unsigned int buffer_idx;
1583 uint32_t save_buffer(struct buffer *buffer)
1585 struct packet_buffer *p;
1588 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
1589 p = &buffers[buffer_idx];
1591 /* Don't buffer packet if existing entry is less than
1592 * OVERWRITE_SECS old. */
1593 if (time_now() < p->timeout) { /* FIXME */
1596 buffer_delete(p->buffer);
1599 /* Don't use maximum cookie value since the all-bits-1 id is
1601 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
1603 p->buffer = buffer_clone(buffer); /* FIXME */
1604 p->timeout = time_now() + OVERWRITE_SECS; /* FIXME */
1605 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
1610 static struct buffer *retrieve_buffer(uint32_t id)
1612 struct buffer *buffer = NULL;
1613 struct packet_buffer *p;
1615 p = &buffers[id & PKT_BUFFER_MASK];
1616 if (p->cookie == id >> PKT_BUFFER_BITS) {
1620 printf("cookie mismatch: %x != %x\n",
1621 id >> PKT_BUFFER_BITS, p->cookie);
1627 static void discard_buffer(uint32_t id)
1629 struct packet_buffer *p;
1631 p = &buffers[id & PKT_BUFFER_MASK];
1632 if (p->cookie == id >> PKT_BUFFER_BITS) {
1633 buffer_delete(p->buffer);