2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
24 static int make_writable(struct sk_buff **);
26 static struct sk_buff *retrieve_skb(uint32_t id);
27 static void discard_skb(uint32_t id);
29 /* 'skb' was received on 'in_port', a physical switch port between 0 and
30 * OFPP_MAX. Process it according to 'chain'. Returns 0 if successful, in
31 * which case 'skb' is destroyed, or -ESRCH if there is no matching flow, in
32 * which case 'skb' still belongs to the caller. */
33 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
36 struct sw_flow_key key;
39 if (flow_extract(skb, in_port, &key)
40 && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
46 flow = chain_lookup(chain, &key);
47 if (likely(flow != NULL)) {
49 execute_actions(chain->dp, skb, &key,
50 flow->actions, flow->n_actions);
57 /* 'skb' was received on 'in_port', a physical switch port between 0 and
58 * OFPP_MAX. Process it according to 'chain', sending it up to the controller
59 * if no flow matches. Takes ownership of 'skb'. */
60 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
62 if (run_flow_through_tables(chain, skb, in_port))
63 dp_output_control(chain->dp, skb, fwd_save_skb(skb),
64 chain->dp->miss_send_len,
68 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
73 return (likely(out_port != OFPP_CONTROLLER)
74 ? dp_output_port(dp, skb, out_port)
75 : dp_output_control(dp, skb, fwd_save_skb(skb),
76 max_len, OFPR_ACTION));
79 void execute_actions(struct datapath *dp, struct sk_buff *skb,
80 const struct sw_flow_key *key,
81 const struct ofp_action *actions, int n_actions)
83 /* Every output action needs a separate clone of 'skb', but the common
84 * case is just a single output action, so that doing a clone and
85 * then freeing the original skbuff is wasteful. So the following code
86 * is slightly obscure just to avoid that. */
88 size_t max_len=0; /* Initialze to make compiler happy */
93 eth_proto = ntohs(key->dl_type);
95 for (i = 0; i < n_actions; i++) {
96 const struct ofp_action *a = &actions[i];
98 if (prev_port != -1) {
99 do_output(dp, skb_clone(skb, GFP_ATOMIC),
104 if (likely(a->type == htons(OFPAT_OUTPUT))) {
105 prev_port = ntohs(a->arg.output.port);
106 max_len = ntohs(a->arg.output.max_len);
108 if (!make_writable(&skb)) {
110 printk("make_writable failed\n");
113 skb = execute_setter(skb, eth_proto, key, a);
116 printk("execute_setter lost skb\n");
122 do_output(dp, skb, max_len, prev_port);
127 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
128 * covered by the sum has been changed from 'from' to 'to'. If set,
129 * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
130 * Based on nf_proto_csum_replace4. */
131 static void update_csum(__sum16 *sum, struct sk_buff *skb,
132 __be32 from, __be32 to, int pseudohdr)
134 __be32 diff[] = { ~from, to };
135 if (skb->ip_summed != CHECKSUM_PARTIAL) {
136 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
137 ~csum_unfold(*sum)));
138 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
139 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
141 } else if (pseudohdr)
142 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
146 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
147 uint8_t nw_proto, const struct ofp_action *a)
149 if (eth_proto == ETH_P_IP) {
150 struct iphdr *nh = ip_hdr(skb);
151 uint32_t new, *field;
153 new = a->arg.nw_addr;
155 if (a->type == htons(OFPAT_SET_NW_SRC))
160 if (nw_proto == IPPROTO_TCP) {
161 struct tcphdr *th = tcp_hdr(skb);
162 update_csum(&th->check, skb, *field, new, 1);
163 } else if (nw_proto == IPPROTO_UDP) {
164 struct udphdr *th = udp_hdr(skb);
165 update_csum(&th->check, skb, *field, new, 1);
167 update_csum(&nh->check, skb, *field, new, 0);
172 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
173 uint8_t nw_proto, const struct ofp_action *a)
175 if (eth_proto == ETH_P_IP) {
176 uint16_t new, *field;
180 if (nw_proto == IPPROTO_TCP) {
181 struct tcphdr *th = tcp_hdr(skb);
183 if (a->type == htons(OFPAT_SET_TP_SRC))
188 update_csum(&th->check, skb, *field, new, 1);
190 } else if (nw_proto == IPPROTO_UDP) {
191 struct udphdr *th = udp_hdr(skb);
193 if (a->type == htons(OFPAT_SET_TP_SRC))
198 update_csum(&th->check, skb, *field, new, 1);
204 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
206 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
210 /* Verify we were given a vlan packet */
211 if (vh->h_vlan_proto != htons(ETH_P_8021Q))
214 memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
216 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
218 skb->protocol = eh->h_proto;
219 skb->mac_header += VLAN_HLEN;
224 static struct sk_buff *modify_vlan(struct sk_buff *skb,
225 const struct sw_flow_key *key, const struct ofp_action *a)
227 uint16_t new_id = ntohs(a->arg.vlan_id);
229 if (new_id != OFP_VLAN_NONE) {
230 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
231 /* Modify vlan id, but maintain other TCI values */
232 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
233 vh->h_vlan_TCI = (vh->h_vlan_TCI
234 & ~(htons(VLAN_VID_MASK))) | a->arg.vlan_id;
236 /* Add vlan header */
238 /* xxx The vlan_put_tag function, doesn't seem to work
239 * xxx reliably when it attempts to use the hardware-accelerated
240 * xxx version. We'll directly use the software version
241 * xxx until the problem can be diagnosed.
243 skb = __vlan_put_tag(skb, new_id);
246 /* Remove an existing vlan header if it exists */
253 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
254 const struct sw_flow_key *key, const struct ofp_action *a)
256 switch (ntohs(a->type)) {
257 case OFPAT_SET_DL_VLAN:
258 skb = modify_vlan(skb, key, a);
261 case OFPAT_SET_DL_SRC: {
262 struct ethhdr *eh = eth_hdr(skb);
263 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
266 case OFPAT_SET_DL_DST: {
267 struct ethhdr *eh = eth_hdr(skb);
268 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
272 case OFPAT_SET_NW_SRC:
273 case OFPAT_SET_NW_DST:
274 modify_nh(skb, eth_proto, key->nw_proto, a);
277 case OFPAT_SET_TP_SRC:
278 case OFPAT_SET_TP_DST:
279 modify_th(skb, eth_proto, key->nw_proto, a);
284 printk("execute_setter: unknown action: %d\n", ntohs(a->type));
291 recv_features_request(struct sw_chain *chain, const struct sender *sender,
294 return dp_send_features_reply(chain->dp, sender);
298 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
301 return dp_send_config_reply(chain->dp, sender);
305 recv_set_config(struct sw_chain *chain, const struct sender *sender,
308 const struct ofp_switch_config *osc = msg;
310 chain->dp->flags = ntohs(osc->flags);
311 chain->dp->miss_send_len = ntohs(osc->miss_send_len);
317 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
320 const struct ofp_packet_out *opo = msg;
322 struct vlan_ethhdr *mac;
325 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
326 int data_len = ntohs(opo->header.length) - sizeof *opo;
328 /* FIXME: there is likely a way to reuse the data in msg. */
329 skb = alloc_skb(data_len, GFP_ATOMIC);
333 /* FIXME? We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
334 * we're just transmitting this raw without examining anything
335 * at those layers. */
336 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
337 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
339 skb_set_mac_header(skb, 0);
340 mac = vlan_eth_hdr(skb);
341 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
342 nh_ofs = sizeof(struct ethhdr);
344 nh_ofs = sizeof(struct vlan_ethhdr);
345 skb_set_network_header(skb, nh_ofs);
347 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
349 struct sw_flow_key key;
352 skb = retrieve_skb(ntohl(opo->buffer_id));
355 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
357 n_acts = (ntohs(opo->header.length) - sizeof *opo)
358 / sizeof *opo->u.actions;
359 flow_extract(skb, ntohs(opo->in_port), &key);
360 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
366 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
369 const struct ofp_port_mod *opm = msg;
371 dp_update_port_flags(chain->dp, &opm->desc);
377 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
380 return dp_send_echo_reply(chain->dp, sender, msg);
384 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
391 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
396 struct sw_flow *flow;
399 /* To prevent loops, make sure there's no action to send to the
400 * OFP_TABLE virtual port.
402 n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
403 for (i=0; i<n_acts; i++) {
404 const struct ofp_action *a = &ofm->actions[i];
406 if (a->type == htons(OFPAT_OUTPUT)
407 && (a->arg.output.port == htons(OFPP_TABLE)
408 || a->arg.output.port == htons(OFPP_NONE))) {
409 /* xxx Send fancy new error message? */
414 /* Allocate memory. */
415 flow = flow_alloc(n_acts, GFP_ATOMIC);
420 flow_extract_match(&flow->key, &ofm->match);
421 flow->max_idle = ntohs(ofm->max_idle);
422 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
423 flow->timeout = jiffies + flow->max_idle * HZ;
424 flow->n_actions = n_acts;
425 flow->init_time = jiffies;
426 flow->byte_count = 0;
427 flow->packet_count = 0;
428 spin_lock_init(&flow->lock);
429 memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
432 error = chain_insert(chain, flow);
434 goto error_free_flow;
436 if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
437 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
439 struct sw_flow_key key;
440 flow_used(flow, skb);
441 flow_extract(skb, ntohs(ofm->match.in_port), &key);
442 execute_actions(chain->dp, skb, &key,
443 ofm->actions, n_acts);
453 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
454 discard_skb(ntohl(ofm->buffer_id));
459 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
461 const struct ofp_flow_mod *ofm = msg;
462 uint16_t command = ntohs(ofm->command);
464 if (command == OFPFC_ADD) {
465 return add_flow(chain, ofm);
466 } else if (command == OFPFC_DELETE) {
467 struct sw_flow_key key;
468 flow_extract_match(&key, &ofm->match);
469 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
470 } else if (command == OFPFC_DELETE_STRICT) {
471 struct sw_flow_key key;
473 flow_extract_match(&key, &ofm->match);
474 priority = key.wildcards ? ntohs(ofm->priority) : -1;
475 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
481 /* 'msg', which is 'length' bytes long, was received across Netlink from
482 * 'sender'. Apply it to 'chain'. */
484 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
485 const void *msg, size_t length)
488 struct openflow_packet {
490 int (*handler)(struct sw_chain *, const struct sender *,
494 static const struct openflow_packet packets[] = {
495 [OFPT_FEATURES_REQUEST] = {
496 sizeof (struct ofp_header),
497 recv_features_request,
499 [OFPT_GET_CONFIG_REQUEST] = {
500 sizeof (struct ofp_header),
501 recv_get_config_request,
503 [OFPT_SET_CONFIG] = {
504 sizeof (struct ofp_switch_config),
507 [OFPT_PACKET_OUT] = {
508 sizeof (struct ofp_packet_out),
512 sizeof (struct ofp_flow_mod),
516 sizeof (struct ofp_port_mod),
519 [OFPT_ECHO_REQUEST] = {
520 sizeof (struct ofp_header),
523 [OFPT_ECHO_REPLY] = {
524 sizeof (struct ofp_header),
529 const struct openflow_packet *pkt;
530 struct ofp_header *oh;
532 oh = (struct ofp_header *) msg;
533 if (oh->version != OFP_VERSION || oh->type >= ARRAY_SIZE(packets)
534 || ntohs(oh->length) > length)
537 pkt = &packets[oh->type];
540 if (length < pkt->min_size)
543 return pkt->handler(chain, sender, msg);
546 /* Packet buffering. */
548 #define OVERWRITE_SECS 1
549 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
551 struct packet_buffer {
554 unsigned long exp_jiffies;
557 static struct packet_buffer buffers[N_PKT_BUFFERS];
558 static unsigned int buffer_idx;
559 static DEFINE_SPINLOCK(buffer_lock);
561 uint32_t fwd_save_skb(struct sk_buff *skb)
563 struct packet_buffer *p;
564 unsigned long int flags;
567 spin_lock_irqsave(&buffer_lock, flags);
568 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
569 p = &buffers[buffer_idx];
571 /* Don't buffer packet if existing entry is less than
572 * OVERWRITE_SECS old. */
573 if (time_before(jiffies, p->exp_jiffies)) {
574 spin_unlock_irqrestore(&buffer_lock, flags);
579 /* Don't use maximum cookie value since the all-bits-1 id is
581 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
585 p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
586 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
587 spin_unlock_irqrestore(&buffer_lock, flags);
592 static struct sk_buff *retrieve_skb(uint32_t id)
594 unsigned long int flags;
595 struct sk_buff *skb = NULL;
596 struct packet_buffer *p;
598 spin_lock_irqsave(&buffer_lock, flags);
599 p = &buffers[id & PKT_BUFFER_MASK];
600 if (p->cookie == id >> PKT_BUFFER_BITS) {
604 printk("cookie mismatch: %x != %x\n",
605 id >> PKT_BUFFER_BITS, p->cookie);
607 spin_unlock_irqrestore(&buffer_lock, flags);
612 void fwd_discard_all(void)
614 unsigned long int flags;
617 spin_lock_irqsave(&buffer_lock, flags);
618 for (i = 0; i < N_PKT_BUFFERS; i++) {
619 kfree_skb(buffers[i].skb);
620 buffers[i].skb = NULL;
622 spin_unlock_irqrestore(&buffer_lock, flags);
625 static void discard_skb(uint32_t id)
627 unsigned long int flags;
628 struct packet_buffer *p;
630 spin_lock_irqsave(&buffer_lock, flags);
631 p = &buffers[id & PKT_BUFFER_MASK];
632 if (p->cookie == id >> PKT_BUFFER_BITS) {
636 spin_unlock_irqrestore(&buffer_lock, flags);
644 /* Utility functions. */
646 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
648 * Returns 1 if successful, 0 on failure. */
650 make_writable(struct sk_buff **pskb)
652 /* Based on skb_make_writable() in net/netfilter/core.c. */
653 struct sk_buff *nskb;
655 /* Not exclusive use of packet? Must copy. */
656 if (skb_shared(*pskb) || skb_cloned(*pskb))
659 return pskb_may_pull(*pskb, 40); /* FIXME? */
662 nskb = skb_copy(*pskb, GFP_ATOMIC);
665 BUG_ON(skb_is_nonlinear(nskb));
667 /* Rest of kernel will get very unhappy if we pass it a
668 suddenly-orphaned skbuff */
670 skb_set_owner_w(nskb, (*pskb)->sk);