Merge remote branch 'repo/master' into stats
[openvswitch] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static int make_writable(struct sk_buff **);
25
26 static struct sk_buff *retrieve_skb(uint32_t id);
27 static void discard_skb(uint32_t id);
28
29 /* 'skb' was received on 'in_port', a physical switch port between 0 and
30  * OFPP_MAX.  Process it according to 'chain'. */
31 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
32 {
33         struct sw_flow_key key;
34         struct sw_flow *flow;
35
36         flow_extract(skb, in_port, &key);
37         flow = chain_lookup(chain, &key);
38         if (likely(flow != NULL)) {
39                 flow_used(flow, skb);
40                 execute_actions(chain->dp, skb, &key,
41                                 flow->actions, flow->n_actions);
42         } else {
43                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
44                                   ntohs(chain->dp->config.miss_send_len),
45                                   OFPR_NO_MATCH);
46         }
47 }
48
49 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
50                         int out_port)
51 {
52         if (!skb)
53                 return -ENOMEM;
54         return (likely(out_port != OFPP_CONTROLLER)
55                 ? dp_output_port(dp, skb, out_port)
56                 : dp_output_control(dp, skb, fwd_save_skb(skb),
57                                          max_len, OFPR_ACTION));
58 }
59
60 void execute_actions(struct datapath *dp, struct sk_buff *skb,
61                                 const struct sw_flow_key *key,
62                                 const struct ofp_action *actions, int n_actions)
63 {
64         /* Every output action needs a separate clone of 'skb', but the common
65          * case is just a single output action, so that doing a clone and
66          * then freeing the original skbuff is wasteful.  So the following code
67          * is slightly obscure just to avoid that. */
68         int prev_port;
69         size_t max_len=0;        /* Initialze to make compiler happy */
70         uint16_t eth_proto;
71         int i;
72
73         prev_port = -1;
74         eth_proto = ntohs(key->dl_type);
75
76         for (i = 0; i < n_actions; i++) {
77                 const struct ofp_action *a = &actions[i];
78
79                 if (prev_port != -1) {
80                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
81                                   max_len, prev_port);
82                         prev_port = -1;
83                 }
84
85                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
86                         prev_port = ntohs(a->arg.output.port);
87                         max_len = ntohs(a->arg.output.max_len);
88                 } else {
89                         if (!make_writable(&skb)) {
90                                 printk("make_writable failed\n");
91                                 break;
92                         }
93                         skb = execute_setter(skb, eth_proto, key, a);
94                 }
95         }
96         if (prev_port != -1)
97                 do_output(dp, skb, max_len, prev_port);
98         else
99                 kfree_skb(skb);
100 }
101
102 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
103  * covered by the sum has been changed from 'from' to 'to'.  If set,
104  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
105  * Based on nf_proto_csum_replace4. */
106 static void update_csum(__sum16 *sum, struct sk_buff *skb,
107                         __be32 from, __be32 to, int pseudohdr)
108 {
109         __be32 diff[] = { ~from, to };
110         if (skb->ip_summed != CHECKSUM_PARTIAL) {
111                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
112                                 ~csum_unfold(*sum)));
113                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
114                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
115                                                 ~skb->csum);
116         } else if (pseudohdr)
117                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
118                                 csum_unfold(*sum)));
119 }
120
121 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
122                         uint8_t nw_proto, const struct ofp_action *a)
123 {
124         if (eth_proto == ETH_P_IP) {
125                 struct iphdr *nh = ip_hdr(skb);
126                 uint32_t new, *field;
127
128                 new = a->arg.nw_addr;
129
130                 if (a->type == htons(OFPAT_SET_NW_SRC))
131                         field = &nh->saddr;
132                 else
133                         field = &nh->daddr;
134
135                 if (nw_proto == IPPROTO_TCP) {
136                         struct tcphdr *th = tcp_hdr(skb);
137                         update_csum(&th->check, skb, *field, new, 1);
138                 } else if (nw_proto == IPPROTO_UDP) {
139                         struct udphdr *th = udp_hdr(skb);
140                         update_csum(&th->check, skb, *field, new, 1);
141                 }
142                 update_csum(&nh->check, skb, *field, new, 0);
143                 *field = new;
144         }
145 }
146
147 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
148                         uint8_t nw_proto, const struct ofp_action *a)
149 {
150         if (eth_proto == ETH_P_IP) {
151                 uint16_t new, *field;
152
153                 new = a->arg.tp;
154
155                 if (nw_proto == IPPROTO_TCP) {
156                         struct tcphdr *th = tcp_hdr(skb);
157
158                         if (a->type == htons(OFPAT_SET_TP_SRC))
159                                 field = &th->source;
160                         else
161                                 field = &th->dest;
162
163                         update_csum(&th->check, skb, *field, new, 1);
164                         *field = new;
165                 } else if (nw_proto == IPPROTO_UDP) {
166                         struct udphdr *th = udp_hdr(skb);
167
168                         if (a->type == htons(OFPAT_SET_TP_SRC))
169                                 field = &th->source;
170                         else
171                                 field = &th->dest;
172
173                         update_csum(&th->check, skb, *field, new, 1);
174                         *field = new;
175                 }
176         }
177 }
178
179 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
180 {
181         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
182         struct ethhdr *eh;
183
184
185         /* Verify we were given a vlan packet */
186         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
187                 return skb;
188
189         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
190
191         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
192
193         skb->protocol = eh->h_proto;
194         skb->mac_header += VLAN_HLEN;
195
196         return skb;
197 }
198
199 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
200                 const struct sw_flow_key *key, const struct ofp_action *a)
201 {
202         uint16_t new_id = a->arg.vlan_id;
203
204         if (new_id != OFP_VLAN_NONE) {
205                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
206                         /* Modify vlan id, but maintain other TCI values */
207                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
208                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
209                                         & ~(htons(VLAN_VID_MASK))) | htons(new_id);
210                 } else  {
211                         /* Add vlan header */
212                         skb = vlan_put_tag(skb, new_id);
213                 }
214         } else  {
215                 /* Remove an existing vlan header if it exists */
216                 vlan_pull_tag(skb);
217         }
218
219         return skb;
220 }
221
222 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
223                         const struct sw_flow_key *key, const struct ofp_action *a)
224 {
225         switch (ntohs(a->type)) {
226         case OFPAT_SET_DL_VLAN:
227                 skb = modify_vlan(skb, key, a);
228                 break;
229
230         case OFPAT_SET_DL_SRC: {
231                 struct ethhdr *eh = eth_hdr(skb);
232                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
233                 break;
234         }
235         case OFPAT_SET_DL_DST: {
236                 struct ethhdr *eh = eth_hdr(skb);
237                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
238                 break;
239         }
240
241         case OFPAT_SET_NW_SRC:
242         case OFPAT_SET_NW_DST:
243                 modify_nh(skb, eth_proto, key->nw_proto, a);
244                 break;
245
246         case OFPAT_SET_TP_SRC:
247         case OFPAT_SET_TP_DST:
248                 modify_th(skb, eth_proto, key->nw_proto, a);
249                 break;
250         
251         default:
252                 if (net_ratelimit())
253                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
254         }
255
256         return skb;
257 }
258
259 static int
260 recv_features_request(struct sw_chain *chain, const struct sender *sender,
261                       const void *msg) 
262 {
263         return dp_send_features_reply(chain->dp, sender);
264 }
265
266 static int
267 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
268                         const void *msg)
269 {
270         return dp_send_config_reply(chain->dp, sender);
271 }
272
273 static int
274 recv_set_config(struct sw_chain *chain, const struct sender *sender,
275                 const void *msg)
276 {
277         const struct ofp_switch_config *osc = msg;
278         chain->dp->config = *osc;
279         return 0;
280 }
281
282 static int
283 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
284                 const void *msg)
285 {
286         const struct ofp_packet_out *opo = msg;
287         struct sk_buff *skb;
288         struct vlan_ethhdr *mac;
289         int nh_ofs;
290
291         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
292                 int data_len = ntohs(opo->header.length) - sizeof *opo;
293
294                 /* FIXME: there is likely a way to reuse the data in msg. */
295                 skb = alloc_skb(data_len, GFP_ATOMIC);
296                 if (!skb)
297                         return -ENOMEM;
298
299                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
300                  * we're just transmitting this raw without examining anything
301                  * at those layers. */
302                 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
303                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
304
305                 skb_set_mac_header(skb, 0);
306                 mac = vlan_eth_hdr(skb);
307                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
308                         nh_ofs = sizeof(struct ethhdr);
309                 else
310                         nh_ofs = sizeof(struct vlan_ethhdr);
311                 skb_set_network_header(skb, nh_ofs);
312
313                 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
314         } else {
315                 struct sw_flow_key key;
316                 int n_acts;
317
318                 skb = retrieve_skb(ntohl(opo->buffer_id));
319                 if (!skb)
320                         return -ESRCH;
321                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
322
323                 n_acts = (ntohs(opo->header.length) - sizeof *opo) 
324                                 / sizeof *opo->u.actions;
325                 flow_extract(skb, ntohs(opo->in_port), &key);
326                 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
327         }
328         return 0;
329 }
330
331 static int
332 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
333               const void *msg)
334 {
335         const struct ofp_port_mod *opm = msg;
336
337         dp_update_port_flags(chain->dp, &opm->desc);
338
339         return 0;
340 }
341
342 static int
343 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
344 {
345         int error = -ENOMEM;
346         int i;
347         int n_acts;
348         struct sw_flow *flow;
349
350
351         /* Check number of actions. */
352         n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
353         if (n_acts > MAX_ACTIONS) {
354                 error = -E2BIG;
355                 goto error;
356         }
357
358         /* To prevent loops, make sure there's no action to send to the
359          * OFP_TABLE virtual port.
360          */
361         for (i=0; i<n_acts; i++) {
362                 const struct ofp_action *a = &ofm->actions[i];
363
364                 if (a->type == htons(OFPAT_OUTPUT) 
365                                         && a->arg.output.port == htons(OFPP_TABLE)) {
366                         /* xxx Send fancy new error message? */
367                         goto error;
368                 }
369         }
370
371         /* Allocate memory. */
372         flow = flow_alloc(n_acts, GFP_ATOMIC);
373         if (flow == NULL)
374                 goto error;
375
376         /* Fill out flow. */
377         flow_extract_match(&flow->key, &ofm->match);
378         flow->group_id = ntohl(ofm->group_id);
379         flow->max_idle = ntohs(ofm->max_idle);
380         flow->timeout = jiffies + flow->max_idle * HZ;
381         flow->n_actions = n_acts;
382         flow->init_time = jiffies;
383         flow->byte_count = 0;
384         flow->packet_count = 0;
385         atomic_set(&flow->deleted, 0);
386         spin_lock_init(&flow->lock);
387         memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
388
389         /* Act. */
390         error = chain_insert(chain, flow);
391         if (error)
392                 goto error_free_flow;
393         error = 0;
394         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
395                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
396                 if (skb) {
397                         struct sw_flow_key key;
398                         flow_used(flow, skb);
399                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
400                         execute_actions(chain->dp, skb, &key,
401                                         ofm->actions, n_acts);
402                 }
403                 else
404                         error = -ESRCH;
405         }
406         return error;
407
408 error_free_flow:
409         flow_free(flow);
410 error:
411         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
412                 discard_skb(ntohl(ofm->buffer_id));
413         return error;
414 }
415
416 static int
417 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
418 {
419         const struct ofp_flow_mod *ofm = msg;
420         uint16_t command = ntohs(ofm->command);
421
422         if (command == OFPFC_ADD) {
423                 return add_flow(chain, ofm);
424         }  else if (command == OFPFC_DELETE) {
425                 struct sw_flow_key key;
426                 flow_extract_match(&key, &ofm->match);
427                 return chain_delete(chain, &key, 0) ? 0 : -ESRCH;
428         } else if (command == OFPFC_DELETE_STRICT) {
429                 struct sw_flow_key key;
430                 flow_extract_match(&key, &ofm->match);
431                 return chain_delete(chain, &key, 1) ? 0 : -ESRCH;
432         } else {
433                 return -ENOTSUPP;
434         }
435 }
436
437 static int
438 recv_flow_status_request(struct sw_chain *chain, const struct sender *sender,
439                          const void *msg)
440 {
441         const struct ofp_flow_stat_request *fsr = msg;
442         if (fsr->type == OFPFS_INDIV) {
443                 return dp_send_flow_stats(chain->dp, sender, &fsr->match); 
444         } else {
445                 /* FIXME */
446                 return -ENOTSUPP;
447         }
448 }
449
450 static int
451 recv_port_status_request(struct sw_chain *chain, const struct sender *sender,
452                          const void *msg)
453 {
454         return dp_send_port_stats(chain->dp, sender);
455 }
456
457 static int
458 recv_table_status_request(struct sw_chain *chain, const struct sender *sender,
459                           const void *msg)
460 {
461         return dp_send_table_stats(chain->dp, sender);
462 }
463
464 /* 'msg', which is 'length' bytes long, was received across Netlink from
465  * 'sender'.  Apply it to 'chain'. */
466 int
467 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
468                   const void *msg, size_t length)
469 {
470
471         struct openflow_packet {
472                 size_t min_size;
473                 int (*handler)(struct sw_chain *, const struct sender *,
474                                const void *);
475         };
476
477         static const struct openflow_packet packets[] = {
478                 [OFPT_FEATURES_REQUEST] = {
479                         sizeof (struct ofp_header),
480                         recv_features_request,
481                 },
482                 [OFPT_GET_CONFIG_REQUEST] = {
483                         sizeof (struct ofp_header),
484                         recv_get_config_request,
485                 },
486                 [OFPT_SET_CONFIG] = {
487                         sizeof (struct ofp_switch_config),
488                         recv_set_config,
489                 },
490                 [OFPT_PACKET_OUT] = {
491                         sizeof (struct ofp_packet_out),
492                         recv_packet_out,
493                 },
494                 [OFPT_FLOW_MOD] = {
495                         sizeof (struct ofp_flow_mod),
496                         recv_flow,
497                 },
498                 [OFPT_PORT_MOD] = {
499                         sizeof (struct ofp_port_mod),
500                         recv_port_mod,
501                 },
502                 [OFPT_FLOW_STAT_REQUEST] = {
503                         sizeof (struct ofp_flow_stat_request),
504                         recv_flow_status_request,
505                 },
506                 [OFPT_PORT_STAT_REQUEST] = {
507                         sizeof (struct ofp_port_stat_request),
508                         recv_port_status_request,
509                 },
510                 [OFPT_TABLE_STAT_REQUEST] = {
511                         sizeof (struct ofp_table_stat_request),
512                         recv_table_status_request,
513                 },
514         };
515
516         const struct openflow_packet *pkt;
517         struct ofp_header *oh;
518
519         oh = (struct ofp_header *) msg;
520         if (oh->version != 1 || oh->type >= ARRAY_SIZE(packets)
521                 || ntohs(oh->length) > length)
522                 return -EINVAL;
523
524         pkt = &packets[oh->type];
525         if (!pkt->handler)
526                 return -ENOSYS;
527         if (length < pkt->min_size)
528                 return -EFAULT;
529
530         return pkt->handler(chain, sender, msg);
531 }
532
533 /* Packet buffering. */
534
535 #define OVERWRITE_SECS  1
536 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
537
538 struct packet_buffer {
539         struct sk_buff *skb;
540         uint32_t cookie;
541         unsigned long exp_jiffies;
542 };
543
544 static struct packet_buffer buffers[N_PKT_BUFFERS];
545 static unsigned int buffer_idx;
546 static DEFINE_SPINLOCK(buffer_lock);
547
548 uint32_t fwd_save_skb(struct sk_buff *skb)
549 {
550         struct packet_buffer *p;
551         unsigned long int flags;
552         uint32_t id;
553
554         spin_lock_irqsave(&buffer_lock, flags);
555         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
556         p = &buffers[buffer_idx];
557         if (p->skb) {
558                 /* Don't buffer packet if existing entry is less than
559                  * OVERWRITE_SECS old. */
560                 if (time_before(jiffies, p->exp_jiffies)) {
561                         spin_unlock_irqrestore(&buffer_lock, flags);
562                         return -1;
563                 } else 
564                         kfree_skb(p->skb);
565         }
566         /* Don't use maximum cookie value since the all-bits-1 id is
567          * special. */
568         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
569                 p->cookie = 0;
570         skb_get(skb);
571         p->skb = skb;
572         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
573         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
574         spin_unlock_irqrestore(&buffer_lock, flags);
575
576         return id;
577 }
578
579 static struct sk_buff *retrieve_skb(uint32_t id)
580 {
581         unsigned long int flags;
582         struct sk_buff *skb = NULL;
583         struct packet_buffer *p;
584
585         spin_lock_irqsave(&buffer_lock, flags);
586         p = &buffers[id & PKT_BUFFER_MASK];
587         if (p->cookie == id >> PKT_BUFFER_BITS) {
588                 skb = p->skb;
589                 p->skb = NULL;
590         } else {
591                 printk("cookie mismatch: %x != %x\n",
592                                 id >> PKT_BUFFER_BITS, p->cookie);
593         }
594         spin_unlock_irqrestore(&buffer_lock, flags);
595
596         return skb;
597 }
598
599 static void discard_skb(uint32_t id)
600 {
601         unsigned long int flags;
602         struct packet_buffer *p;
603
604         spin_lock_irqsave(&buffer_lock, flags);
605         p = &buffers[id & PKT_BUFFER_MASK];
606         if (p->cookie == id >> PKT_BUFFER_BITS) {
607                 kfree_skb(p->skb);
608                 p->skb = NULL;
609         }
610         spin_unlock_irqrestore(&buffer_lock, flags);
611 }
612
613 void fwd_exit(void)
614 {
615         int i;
616
617         for (i = 0; i < N_PKT_BUFFERS; i++)
618                 kfree_skb(buffers[i].skb);
619 }
620
621 /* Utility functions. */
622
623 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
624  * the copy.
625  * Returns 1 if successful, 0 on failure. */
626 static int
627 make_writable(struct sk_buff **pskb)
628 {
629         /* Based on skb_make_writable() in net/netfilter/core.c. */
630         struct sk_buff *nskb;
631
632         /* Not exclusive use of packet?  Must copy. */
633         if (skb_shared(*pskb) || skb_cloned(*pskb))
634                 goto copy_skb;
635
636         return pskb_may_pull(*pskb, 64); /* FIXME? */
637
638 copy_skb:
639         nskb = skb_copy(*pskb, GFP_ATOMIC);
640         if (!nskb)
641                 return 0;
642         BUG_ON(skb_is_nonlinear(nskb));
643
644         /* Rest of kernel will get very unhappy if we pass it a
645            suddenly-orphaned skbuff */
646         if ((*pskb)->sk)
647                 skb_set_owner_w(nskb, (*pskb)->sk);
648         kfree_skb(*pskb);
649         *pskb = nskb;
650         return 1;
651 }