2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
24 static void execute_actions(struct datapath *, struct sk_buff *,
25 const struct sw_flow_key *,
26 const struct ofp_action *, int n_actions);
27 static int make_writable(struct sk_buff **);
29 static struct sk_buff *retrieve_skb(uint32_t id);
30 static void discard_skb(uint32_t id);
32 /* 'skb' was received on 'in_port', a physical switch port between 0 and
33 * OFPP_MAX. Process it according to 'chain'. */
34 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
36 struct sw_flow_key key;
39 flow_extract(skb, in_port, &key);
40 flow = chain_lookup(chain, &key);
41 if (likely(flow != NULL)) {
43 execute_actions(chain->dp, skb, &key,
44 flow->actions, flow->n_actions);
46 dp_output_control(chain->dp, skb, fwd_save_skb(skb),
47 chain->dp->miss_send_len, OFPR_NO_MATCH);
51 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
56 return (likely(out_port != OFPP_CONTROLLER)
57 ? dp_output_port(dp, skb, out_port)
58 : dp_output_control(dp, skb, fwd_save_skb(skb),
59 max_len, OFPR_ACTION));
62 static void execute_actions(struct datapath *dp, struct sk_buff *skb,
63 const struct sw_flow_key *key,
64 const struct ofp_action *actions, int n_actions)
66 /* Every output action needs a separate clone of 'skb', but the common
67 * case is just a single output action, so that doing a clone and
68 * then freeing the original skbuff is wasteful. So the following code
69 * is slightly obscure just to avoid that. */
71 size_t max_len=0; /* Initialze to make compiler happy */
76 eth_proto = ntohs(key->dl_type);
78 for (i = 0; i < n_actions; i++) {
79 const struct ofp_action *a = &actions[i];
81 if (prev_port != -1) {
82 do_output(dp, skb_clone(skb, GFP_ATOMIC),
87 if (likely(a->type == ntohs(OFPAT_OUTPUT))) {
88 prev_port = ntohs(a->arg.output.port);
89 max_len = ntohs(a->arg.output.max_len);
91 if (!make_writable(&skb)) {
92 printk("make_writable failed\n");
95 skb = execute_setter(skb, eth_proto, key, a);
99 do_output(dp, skb, max_len, prev_port);
104 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
105 * covered by the sum has been changed from 'from' to 'to'. If set,
106 * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
107 * Based on nf_proto_csum_replace4. */
108 static void update_csum(__sum16 *sum, struct sk_buff *skb,
109 __be32 from, __be32 to, int pseudohdr)
111 __be32 diff[] = { ~from, to };
112 if (skb->ip_summed != CHECKSUM_PARTIAL) {
113 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
114 ~csum_unfold(*sum)));
115 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
116 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
118 } else if (pseudohdr)
119 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
123 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
124 uint8_t nw_proto, const struct ofp_action *a)
126 if (eth_proto == ETH_P_IP) {
127 struct iphdr *nh = ip_hdr(skb);
128 uint32_t new, *field;
130 new = a->arg.nw_addr;
132 if (a->type == OFPAT_SET_NW_SRC)
137 if (nw_proto == IPPROTO_TCP) {
138 struct tcphdr *th = tcp_hdr(skb);
139 update_csum(&th->check, skb, *field, new, 1);
140 } else if (nw_proto == IPPROTO_UDP) {
141 struct udphdr *th = udp_hdr(skb);
142 update_csum(&th->check, skb, *field, new, 1);
144 update_csum(&nh->check, skb, *field, new, 0);
149 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
150 uint8_t nw_proto, const struct ofp_action *a)
152 if (eth_proto == ETH_P_IP) {
153 uint16_t new, *field;
157 if (nw_proto == IPPROTO_TCP) {
158 struct tcphdr *th = tcp_hdr(skb);
160 if (a->type == OFPAT_SET_TP_SRC)
165 update_csum(&th->check, skb, *field, new, 1);
167 } else if (nw_proto == IPPROTO_UDP) {
168 struct udphdr *th = udp_hdr(skb);
170 if (a->type == OFPAT_SET_TP_SRC)
175 update_csum(&th->check, skb, *field, new, 1);
181 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
183 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
187 /* Verify we were given a vlan packet */
188 if (vh->h_vlan_proto != __constant_htons(ETH_P_8021Q))
191 memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
193 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
195 skb->protocol = eh->h_proto;
196 skb->mac_header += VLAN_HLEN;
201 static struct sk_buff *modify_vlan(struct sk_buff *skb,
202 const struct sw_flow_key *key, const struct ofp_action *a)
204 uint16_t new_id = a->arg.vlan_id;
206 if (new_id != OFP_VLAN_NONE) {
207 if (key->dl_vlan != __constant_htons(OFP_VLAN_NONE)) {
208 /* Modify vlan id, but maintain other TCI values */
209 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
210 vh->h_vlan_TCI = (vh->h_vlan_TCI
211 & ~(__constant_htons(VLAN_VID_MASK))) | htons(new_id);
213 /* Add vlan header */
214 skb = vlan_put_tag(skb, new_id);
217 /* Remove an existing vlan header if it exists */
224 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
225 const struct sw_flow_key *key, const struct ofp_action *a)
228 case OFPAT_SET_DL_VLAN:
229 skb = modify_vlan(skb, key, a);
232 case OFPAT_SET_DL_SRC: {
233 struct ethhdr *eh = eth_hdr(skb);
234 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
237 case OFPAT_SET_DL_DST: {
238 struct ethhdr *eh = eth_hdr(skb);
239 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
243 case OFPAT_SET_NW_SRC:
244 case OFPAT_SET_NW_DST:
245 modify_nh(skb, eth_proto, key->nw_proto, a);
248 case OFPAT_SET_TP_SRC:
249 case OFPAT_SET_TP_DST:
250 modify_th(skb, eth_proto, key->nw_proto, a);
261 recv_control_hello(struct sw_chain *chain, const void *msg)
263 const struct ofp_control_hello *och = msg;
265 printk("control_hello(version=%d)\n", ntohl(och->version));
267 if (ntohs(och->miss_send_len) != OFP_MISS_SEND_LEN_UNCHANGED) {
268 chain->dp->miss_send_len = ntohs(och->miss_send_len);
271 chain->dp->hello_flags = ntohs(och->flags);
273 dp_send_hello(chain->dp);
279 recv_packet_out(struct sw_chain *chain, const void *msg)
281 const struct ofp_packet_out *opo = msg;
283 struct vlan_ethhdr *mac;
286 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
287 int data_len = ntohs(opo->header.length) - sizeof *opo;
289 /* FIXME: there is likely a way to reuse the data in msg. */
290 skb = alloc_skb(data_len, GFP_ATOMIC);
294 /* FIXME? We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
295 * we're just transmitting this raw without examining anything
296 * at those layers. */
297 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
298 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
300 skb_set_mac_header(skb, 0);
301 mac = vlan_eth_hdr(skb);
302 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
303 nh_ofs = sizeof(struct ethhdr);
305 nh_ofs = sizeof(struct vlan_ethhdr);
306 skb_set_network_header(skb, nh_ofs);
308 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
310 struct sw_flow_key key;
313 skb = retrieve_skb(ntohl(opo->buffer_id));
316 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
318 n_acts = (ntohs(opo->header.length) - sizeof *opo)
319 / sizeof *opo->u.actions;
320 flow_extract(skb, ntohs(opo->in_port), &key);
321 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
327 recv_port_mod(struct sw_chain *chain, const void *msg)
329 const struct ofp_port_mod *opm = msg;
331 dp_update_port_flags(chain->dp, &opm->desc);
337 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
341 struct sw_flow *flow;
344 /* Check number of actions. */
345 n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
346 if (n_acts > MAX_ACTIONS) {
351 /* Allocate memory. */
352 flow = flow_alloc(n_acts, GFP_ATOMIC);
357 flow_extract_match(&flow->key, &ofm->match);
358 flow->group_id = ntohl(ofm->group_id);
359 flow->max_idle = ntohs(ofm->max_idle);
360 flow->timeout = jiffies + flow->max_idle * HZ;
361 flow->n_actions = n_acts;
362 flow->init_time = jiffies;
363 flow->byte_count = 0;
364 flow->packet_count = 0;
365 atomic_set(&flow->deleted, 0);
366 spin_lock_init(&flow->lock);
367 memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
370 error = chain_insert(chain, flow);
372 goto error_free_flow;
374 if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
375 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
377 struct sw_flow_key key;
378 flow_used(flow, skb);
379 flow_extract(skb, ntohs(ofm->match.in_port), &key);
380 execute_actions(chain->dp, skb, &key,
381 ofm->actions, n_acts);
391 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
392 discard_skb(ntohl(ofm->buffer_id));
397 recv_flow(struct sw_chain *chain, const void *msg)
399 const struct ofp_flow_mod *ofm = msg;
400 uint16_t command = ntohs(ofm->command);
402 if (command == OFPFC_ADD) {
403 return add_flow(chain, ofm);
404 } else if (command == OFPFC_DELETE) {
405 struct sw_flow_key key;
406 flow_extract_match(&key, &ofm->match);
407 return chain_delete(chain, &key, 0) ? 0 : -ESRCH;
408 } else if (command == OFPFC_DELETE_STRICT) {
409 struct sw_flow_key key;
410 flow_extract_match(&key, &ofm->match);
411 return chain_delete(chain, &key, 1) ? 0 : -ESRCH;
417 /* 'msg', which is 'length' bytes long, was received from the control path.
418 * Apply it to 'chain'. */
420 fwd_control_input(struct sw_chain *chain, const void *msg, size_t length)
423 struct openflow_packet {
425 int (*handler)(struct sw_chain *, const void *);
428 static const struct openflow_packet packets[] = {
429 [OFPT_CONTROL_HELLO] = {
430 sizeof (struct ofp_control_hello),
433 [OFPT_PACKET_OUT] = {
434 sizeof (struct ofp_packet_out),
438 sizeof (struct ofp_flow_mod),
442 sizeof (struct ofp_port_mod),
447 const struct openflow_packet *pkt;
448 struct ofp_header *oh;
450 if (length < sizeof(struct ofp_header))
453 oh = (struct ofp_header *) msg;
454 if (oh->version != 1 || oh->type >= ARRAY_SIZE(packets)
455 || ntohs(oh->length) > length)
458 pkt = &packets[oh->type];
461 if (length < pkt->min_size)
464 return pkt->handler(chain, msg);
467 /* Packet buffering. */
469 #define OVERWRITE_SECS 1
470 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
472 struct packet_buffer {
475 unsigned long exp_jiffies;
478 static struct packet_buffer buffers[N_PKT_BUFFERS];
479 static unsigned int buffer_idx;
480 static DEFINE_SPINLOCK(buffer_lock);
482 uint32_t fwd_save_skb(struct sk_buff *skb)
484 struct packet_buffer *p;
485 unsigned long int flags;
488 spin_lock_irqsave(&buffer_lock, flags);
489 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
490 p = &buffers[buffer_idx];
492 /* Don't buffer packet if existing entry is less than
493 * OVERWRITE_SECS old. */
494 if (time_before(jiffies, p->exp_jiffies)) {
495 spin_unlock_irqrestore(&buffer_lock, flags);
500 /* Don't use maximum cookie value since the all-bits-1 id is
502 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
506 p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
507 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
508 spin_unlock_irqrestore(&buffer_lock, flags);
513 static struct sk_buff *retrieve_skb(uint32_t id)
515 unsigned long int flags;
516 struct sk_buff *skb = NULL;
517 struct packet_buffer *p;
519 spin_lock_irqsave(&buffer_lock, flags);
520 p = &buffers[id & PKT_BUFFER_MASK];
521 if (p->cookie == id >> PKT_BUFFER_BITS) {
525 printk("cookie mismatch: %x != %x\n",
526 id >> PKT_BUFFER_BITS, p->cookie);
528 spin_unlock_irqrestore(&buffer_lock, flags);
533 static void discard_skb(uint32_t id)
535 unsigned long int flags;
536 struct packet_buffer *p;
538 spin_lock_irqsave(&buffer_lock, flags);
539 p = &buffers[id & PKT_BUFFER_MASK];
540 if (p->cookie == id >> PKT_BUFFER_BITS) {
544 spin_unlock_irqrestore(&buffer_lock, flags);
551 for (i = 0; i < N_PKT_BUFFERS; i++)
552 kfree_skb(buffers[i].skb);
555 /* Utility functions. */
557 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
559 * Returns 1 if successful, 0 on failure. */
561 make_writable(struct sk_buff **pskb)
563 /* Based on skb_make_writable() in net/netfilter/core.c. */
564 struct sk_buff *nskb;
566 /* Not exclusive use of packet? Must copy. */
567 if (skb_shared(*pskb) || skb_cloned(*pskb))
570 return pskb_may_pull(*pskb, 64); /* FIXME? */
573 nskb = skb_copy(*pskb, GFP_ATOMIC);
576 BUG_ON(skb_is_nonlinear(nskb));
578 /* Rest of kernel will get very unhappy if we pass it a
579 suddenly-orphaned skbuff */
581 skb_set_owner_w(nskb, (*pskb)->sk);