2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/version.h>
12 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
17 #include <linux/list.h>
18 #include <linux/net.h>
21 #include <net/inet_frag.h>
23 #include <net/protocol.h>
28 #include "vport-generic.h"
30 #define CAPWAP_SRC_PORT 58881
31 #define CAPWAP_DST_PORT 58882
33 #define CAPWAP_FRAG_TIMEOUT (30 * HZ)
34 #define CAPWAP_FRAG_MAX_MEM (256 * 1024)
35 #define CAPWAP_FRAG_PRUNE_MEM (192 *1024)
36 #define CAPWAP_FRAG_SECRET_INTERVAL (10 * 60 * HZ)
39 * The CAPWAP header is a mess, with all kinds of odd size bit fields that
40 * cross byte boundaries, which are difficult to represent correctly in
41 * various byte orderings. Luckily we only care about a few permutations, so
42 * statically create them and we can do very fast parsing by checking all 12
45 #define CAPWAP_BEGIN_HLEN __cpu_to_be32(0x00100000)
46 #define CAPWAP_BEGIN_WBID __cpu_to_be32(0x00000200)
47 #define CAPWAP_BEGIN_FRAG __cpu_to_be32(0x00000080)
48 #define CAPWAP_BEGIN_LAST __cpu_to_be32(0x00000040)
50 #define NO_FRAG_HDR (CAPWAP_BEGIN_HLEN | CAPWAP_BEGIN_WBID)
51 #define FRAG_HDR (NO_FRAG_HDR | CAPWAP_BEGIN_FRAG)
52 #define FRAG_LAST_HDR (FRAG_HDR | CAPWAP_BEGIN_LAST)
60 static inline struct capwaphdr *capwap_hdr(const struct sk_buff *skb)
62 return (struct capwaphdr *)(udp_hdr(skb) + 1);
66 * The fragment offset is actually the high 13 bits of the last 16 bit field,
67 * so we would normally need to right shift 3 places. However, it stores the
68 * offset in 8 byte chunks, which would involve a 3 place left shift. So we
69 * just mask off the last 3 bits and be done with it.
71 #define FRAG_OFF_MASK (~0x7U)
73 #define CAPWAP_HLEN (sizeof(struct udphdr) + sizeof(struct capwaphdr))
82 struct inet_frag_queue ifq;
83 struct frag_match match;
89 #define FRAG_CB(skb) ((struct frag_skb_cb *)(skb)->cb)
91 static struct sk_buff *fragment(struct sk_buff *, const struct vport *,
93 static void defrag_init(void);
94 static void defrag_exit(void);
95 static struct sk_buff *defrag(struct sk_buff *, bool frag_last);
97 static void capwap_frag_init(struct inet_frag_queue *, void *match);
98 static unsigned int capwap_frag_hash(struct inet_frag_queue *);
99 static int capwap_frag_match(struct inet_frag_queue *, void *match);
100 static void capwap_frag_expire(unsigned long ifq);
102 static struct inet_frags frag_state = {
103 .constructor = capwap_frag_init,
104 .qsize = sizeof(struct frag_queue),
105 .hashfn = capwap_frag_hash,
106 .match = capwap_frag_match,
107 .frag_expire = capwap_frag_expire,
108 .secret_interval = CAPWAP_FRAG_SECRET_INTERVAL,
110 static struct netns_frags frag_netns_state = {
111 .timeout = CAPWAP_FRAG_TIMEOUT,
112 .high_thresh = CAPWAP_FRAG_MAX_MEM,
113 .low_thresh = CAPWAP_FRAG_PRUNE_MEM,
116 static struct socket *capwap_rcv_socket;
118 static int capwap_hdr_len(const struct tnl_port_config *port_config)
120 /* CAPWAP has neither checksums nor keys, so reject ports with those. */
121 if (port_config->flags & (TNL_F_CSUM | TNL_F_IN_KEY_MATCH |
122 TNL_F_OUT_KEY_ACTION))
125 if (port_config->in_key != 0 || port_config->out_key != 0)
131 static struct sk_buff *capwap_build_header(struct sk_buff *skb,
132 const struct vport *vport,
133 const struct tnl_mutable_config *mutable,
134 struct dst_entry *dst)
136 struct udphdr *udph = udp_hdr(skb);
137 struct capwaphdr *cwh = capwap_hdr(skb);
139 udph->source = htons(CAPWAP_SRC_PORT);
140 udph->dest = htons(CAPWAP_DST_PORT);
141 udph->len = htons(skb->len - sizeof(struct iphdr));
144 cwh->begin = NO_FRAG_HDR;
148 if (unlikely(skb->len > dst_mtu(dst)))
149 skb = fragment(skb, vport, dst);
154 static inline struct sk_buff *process_capwap_proto(struct sk_buff *skb)
156 struct capwaphdr *cwh = capwap_hdr(skb);
158 if (likely(cwh->begin == NO_FRAG_HDR))
160 else if (cwh->begin == FRAG_HDR)
161 return defrag(skb, false);
162 else if (cwh->begin == FRAG_LAST_HDR)
163 return defrag(skb, true);
166 pr_warn("unparsable packet receive on capwap socket\n");
173 /* Called with rcu_read_lock and BH disabled. */
174 static int capwap_rcv(struct sock *sk, struct sk_buff *skb)
177 const struct tnl_mutable_config *mutable;
180 if (unlikely(!pskb_may_pull(skb, CAPWAP_HLEN + ETH_HLEN)))
183 __skb_pull(skb, CAPWAP_HLEN);
184 skb_postpull_rcsum(skb, skb_transport_header(skb), CAPWAP_HLEN + ETH_HLEN);
186 skb = process_capwap_proto(skb);
191 vport = tnl_find_port(iph->daddr, iph->saddr, 0,
192 TNL_T_PROTO_CAPWAP | TNL_T_KEY_EXACT, &mutable);
193 if (unlikely(!vport)) {
194 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
207 struct tnl_ops capwap_tnl_ops = {
208 .tunnel_type = TNL_T_PROTO_CAPWAP,
209 .ipproto = IPPROTO_UDP,
210 .hdr_len = capwap_hdr_len,
211 .build_header = capwap_build_header,
214 static struct vport *capwap_create(const char *name, const void __user *config)
216 return tnl_create(name, config, &capwap_vport_ops, &capwap_tnl_ops);
219 /* Random value. Irrelevant as long as it's not 0 since we set the handler. */
220 #define UDP_ENCAP_CAPWAP 10
221 static int capwap_init(void)
224 struct sockaddr_in sin;
226 err = sock_create(AF_INET, SOCK_DGRAM, 0, &capwap_rcv_socket);
230 sin.sin_family = AF_INET;
231 sin.sin_addr.s_addr = INADDR_ANY;
232 sin.sin_port = htons(CAPWAP_DST_PORT);
234 err = kernel_bind(capwap_rcv_socket, (struct sockaddr *)&sin,
235 sizeof(struct sockaddr_in));
239 udp_sk(capwap_rcv_socket->sk)->encap_type = UDP_ENCAP_CAPWAP;
240 udp_sk(capwap_rcv_socket->sk)->encap_rcv = capwap_rcv;
247 sock_release(capwap_rcv_socket);
249 pr_warn("cannot register capwap protocol handler\n");
253 static void capwap_exit(void)
257 sock_release(capwap_rcv_socket);
260 static void copy_skb_metadata(struct sk_buff *from, struct sk_buff *to)
262 to->pkt_type = from->pkt_type;
263 to->priority = from->priority;
264 to->protocol = from->protocol;
265 skb_dst_set(to, dst_clone(skb_dst(from)));
267 to->mark = from->mark;
270 skb_set_owner_w(to, from->sk);
272 #ifdef CONFIG_NET_SCHED
273 to->tc_index = from->tc_index;
275 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
276 to->ipvs_property = from->ipvs_property;
278 skb_copy_secmark(to, from);
281 static struct sk_buff *fragment(struct sk_buff *skb, const struct vport *vport,
282 struct dst_entry *dst)
284 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
285 unsigned int hlen = sizeof(struct iphdr) + CAPWAP_HLEN;
286 unsigned int headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len;
287 struct sk_buff *result = NULL, *list_cur = NULL;
288 unsigned int remaining;
292 if (hlen + ~FRAG_OFF_MASK + 1 > dst_mtu(dst)) {
294 pr_warn("capwap link mtu (%d) is less than minimum packet (%d)\n",
295 dst_mtu(dst), hlen + ~FRAG_OFF_MASK + 1);
299 remaining = skb->len - hlen;
301 frag_id = htons(atomic_inc_return(&tnl_vport->frag_id));
304 struct sk_buff *skb2;
308 struct capwaphdr *cwh;
310 frag_size = min(remaining, dst_mtu(dst) - hlen);
311 if (remaining > frag_size)
312 frag_size &= FRAG_OFF_MASK;
314 skb2 = alloc_skb(headroom + hlen + frag_size, GFP_ATOMIC);
318 skb_reserve(skb2, headroom);
319 __skb_put(skb2, hlen + frag_size);
320 skb_reset_network_header(skb2);
321 skb_set_transport_header(skb2, sizeof(struct iphdr));
323 /* Copy IP/UDP/CAPWAP header. */
324 copy_skb_metadata(skb, skb2);
325 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
327 /* Copy this data chunk. */
328 if (skb_copy_bits(skb, hlen + offset, skb2->data + hlen, frag_size))
332 iph->tot_len = hlen + frag_size;
335 udph = udp_hdr(skb2);
336 udph->len = htons(skb2->len - sizeof(struct iphdr));
338 cwh = capwap_hdr(skb2);
339 if (remaining > frag_size)
340 cwh->begin = FRAG_HDR;
342 cwh->begin = FRAG_LAST_HDR;
343 cwh->frag_id = frag_id;
344 cwh->frag_off = htons(offset);
347 list_cur->next = skb2;
350 result = list_cur = skb2;
353 remaining -= frag_size;
360 list_cur = result->next;
369 /* All of the following functions relate to fragmentation reassembly. */
371 static inline struct frag_queue *ifq_cast(struct inet_frag_queue *ifq)
373 return container_of(ifq, struct frag_queue, ifq);
376 static u32 frag_hash(struct frag_match *match)
378 return jhash_3words((__force u16)match->id, (__force u32)match->saddr,
379 (__force u32)match->daddr,
380 frag_state.rnd) & (INETFRAGS_HASHSZ - 1);
383 static struct frag_queue *queue_find(struct frag_match *match)
385 struct inet_frag_queue *ifq;
387 read_lock(&frag_state.lock);
389 ifq = inet_frag_find(&frag_netns_state, &frag_state, match, frag_hash(match));
393 /* Unlock happens inside inet_frag_find(). */
395 return ifq_cast(ifq);
398 static struct sk_buff *frag_reasm(struct frag_queue *fq, struct net_device *dev)
400 struct sk_buff *head = fq->ifq.fragments;
401 struct sk_buff *frag;
403 /* Succeed or fail, we're done with this queue. */
404 inet_frag_kill(&fq->ifq, &frag_state);
406 if (fq->ifq.len > 65535)
409 /* Can't have the head be a clone. */
410 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
414 * We're about to build frag list for this SKB. If it already has a
415 * frag list, alloc a new SKB and put the existing frag list there.
417 if (skb_shinfo(head)->frag_list) {
421 frag = alloc_skb(0, GFP_ATOMIC);
425 frag->next = head->next;
427 skb_shinfo(frag)->frag_list = skb_shinfo(head)->frag_list;
428 skb_shinfo(head)->frag_list = NULL;
430 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
431 paged_len += skb_shinfo(head)->frags[i].size;
432 frag->len = frag->data_len = head->data_len - paged_len;
433 head->data_len -= frag->len;
434 head->len -= frag->len;
436 frag->ip_summed = head->ip_summed;
437 atomic_add(frag->truesize, &fq->ifq.net->mem);
440 skb_shinfo(head)->frag_list = head->next;
441 atomic_sub(head->truesize, &fq->ifq.net->mem);
443 /* Properly account for data in various packets. */
444 for (frag = head->next; frag; frag = frag->next) {
445 head->data_len += frag->len;
446 head->len += frag->len;
448 if (head->ip_summed != frag->ip_summed)
449 head->ip_summed = CHECKSUM_NONE;
450 else if (head->ip_summed == CHECKSUM_COMPLETE)
451 head->csum = csum_add(head->csum, frag->csum);
453 head->truesize += frag->truesize;
454 atomic_sub(frag->truesize, &fq->ifq.net->mem);
459 head->tstamp = fq->ifq.stamp;
460 fq->ifq.fragments = NULL;
465 static struct sk_buff *frag_queue(struct frag_queue *fq, struct sk_buff *skb,
466 u16 offset, bool frag_last)
468 struct sk_buff *prev, *next;
469 struct net_device *dev;
472 if (fq->ifq.last_in & INET_FRAG_COMPLETE)
478 end = offset + skb->len;
482 * Last fragment, shouldn't already have data past our end or
483 * have another last fragment.
485 if (end < fq->ifq.len || fq->ifq.last_in & INET_FRAG_LAST_IN)
488 fq->ifq.last_in |= INET_FRAG_LAST_IN;
491 /* Fragments should align to 8 byte chunks. */
492 if (end & ~FRAG_OFF_MASK)
495 if (end > fq->ifq.len) {
497 * Shouldn't have data past the end, if we already
500 if (fq->ifq.last_in & INET_FRAG_LAST_IN)
507 /* Find where we fit in. */
509 for (next = fq->ifq.fragments; next != NULL; next = next->next) {
510 if (FRAG_CB(next)->offset >= offset)
516 * Overlapping fragments aren't allowed. We shouldn't start before
517 * the end of the previous fragment.
519 if (prev && FRAG_CB(prev)->offset + prev->len > offset)
522 /* We also shouldn't end after the beginning of the next fragment. */
523 if (next && end > FRAG_CB(next)->offset)
526 FRAG_CB(skb)->offset = offset;
528 /* Link into list. */
533 fq->ifq.fragments = skb;
538 fq->ifq.stamp = skb->tstamp;
539 fq->ifq.meat += skb->len;
540 atomic_add(skb->truesize, &fq->ifq.net->mem);
542 fq->ifq.last_in |= INET_FRAG_FIRST_IN;
544 /* If we have all fragments do reassembly. */
545 if (fq->ifq.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
546 fq->ifq.meat == fq->ifq.len)
547 return frag_reasm(fq, dev);
549 write_lock(&frag_state.lock);
550 list_move_tail(&fq->ifq.lru_list, &fq->ifq.net->lru_list);
551 write_unlock(&frag_state.lock);
560 static struct sk_buff *defrag(struct sk_buff *skb, bool frag_last)
562 struct iphdr *iph = ip_hdr(skb);
563 struct capwaphdr *cwh = capwap_hdr(skb);
564 struct frag_match match;
566 struct frag_queue *fq;
568 if (atomic_read(&frag_netns_state.mem) > frag_netns_state.high_thresh)
569 inet_frag_evictor(&frag_netns_state, &frag_state);
571 match.daddr = iph->daddr;
572 match.saddr = iph->saddr;
573 match.id = cwh->frag_id;
574 frag_off = ntohs(cwh->frag_off) & FRAG_OFF_MASK;
576 fq = queue_find(&match);
578 spin_lock(&fq->ifq.lock);
579 skb = frag_queue(fq, skb, frag_off, frag_last);
580 spin_unlock(&fq->ifq.lock);
582 inet_frag_put(&fq->ifq, &frag_state);
591 static void defrag_init(void)
593 inet_frags_init(&frag_state);
594 inet_frags_init_net(&frag_netns_state);
597 static void defrag_exit(void)
599 inet_frags_exit_net(&frag_netns_state, &frag_state);
600 inet_frags_fini(&frag_state);
603 static void capwap_frag_init(struct inet_frag_queue *ifq, void *match_)
605 struct frag_match *match = match_;
607 ifq_cast(ifq)->match = *match;
610 static unsigned int capwap_frag_hash(struct inet_frag_queue *ifq)
612 return frag_hash(&ifq_cast(ifq)->match);
615 static int capwap_frag_match(struct inet_frag_queue *ifq, void *a_)
617 struct frag_match *a = a_;
618 struct frag_match *b = &ifq_cast(ifq)->match;
620 return a->id == b->id && a->saddr == b->saddr && a->daddr == b->daddr;
623 /* Run when the timeout for a given queue expires. */
624 static void capwap_frag_expire(unsigned long ifq)
626 struct frag_queue *fq;
628 fq = ifq_cast((struct inet_frag_queue *)ifq);
630 spin_lock(&fq->ifq.lock);
632 if (!(fq->ifq.last_in & INET_FRAG_COMPLETE))
633 inet_frag_kill(&fq->ifq, &frag_state);
635 spin_unlock(&fq->ifq.lock);
636 inet_frag_put(&fq->ifq, &frag_state);
639 struct vport_ops capwap_vport_ops = {
641 .flags = VPORT_F_GEN_STATS,
644 .create = capwap_create,
645 .modify = tnl_modify,
646 .destroy = tnl_destroy,
647 .set_mtu = tnl_set_mtu,
648 .set_addr = tnl_set_addr,
649 .get_name = tnl_get_name,
650 .get_addr = tnl_get_addr,
651 .get_dev_flags = vport_gen_get_dev_flags,
652 .is_running = vport_gen_is_running,
653 .get_operstate = vport_gen_get_operstate,
654 .get_mtu = tnl_get_mtu,
658 #endif /* Linux kernel >= 2.6.26 */