tunnel: Handle hh_cache access for Linux kernel 3.1
[openvswitch] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/in.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <linux/rculist.h>
21
22 #include <net/dsfield.h>
23 #include <net/dst.h>
24 #include <net/icmp.h>
25 #include <net/inet_ecn.h>
26 #include <net/ip.h>
27 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/ipv6.h>
29 #endif
30 #include <net/route.h>
31 #include <net/xfrm.h>
32
33 #include "actions.h"
34 #include "checksum.h"
35 #include "datapath.h"
36 #include "tunnel.h"
37 #include "vlan.h"
38 #include "vport.h"
39 #include "vport-generic.h"
40 #include "vport-internal_dev.h"
41
42 #ifdef NEED_CACHE_TIMEOUT
43 /*
44  * On kernels where we can't quickly detect changes in the rest of the system
45  * we use an expiration time to invalidate the cache.  A shorter expiration
46  * reduces the length of time that we may potentially blackhole packets while
47  * a longer time increases performance by reducing the frequency that the
48  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
49  * invalidated before the expiration time but this is the maximum.  The time
50  * is expressed in jiffies.
51  */
52 #define MAX_CACHE_EXP HZ
53 #endif
54
55 /*
56  * Interval to check for and remove caches that are no longer valid.  Caches
57  * are checked for validity before they are used for packet encapsulation and
58  * old caches are removed at that time.  However, if no packets are sent through
59  * the tunnel then the cache will never be destroyed.  Since it holds
60  * references to a number of system objects, the cache will continue to use
61  * system resources by not allowing those objects to be destroyed.  The cache
62  * cleaner is periodically run to free invalid caches.  It does not
63  * significantly affect system performance.  A lower interval will release
64  * resources faster but will itself consume resources by requiring more frequent
65  * checks.  A longer interval may result in messages being printed to the kernel
66  * message buffer about unreleased resources.  The interval is expressed in
67  * jiffies.
68  */
69 #define CACHE_CLEANER_INTERVAL (5 * HZ)
70
71 #define CACHE_DATA_ALIGN 16
72 #define PORT_TABLE_SIZE  1024
73
74 static struct hlist_head *port_table __read_mostly;
75 static int port_table_count;
76
77 static void cache_cleaner(struct work_struct *work);
78 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
79
80 /*
81  * These are just used as an optimization: they don't require any kind of
82  * synchronization because we could have just as easily read the value before
83  * the port change happened.
84  */
85 static unsigned int key_local_remote_ports __read_mostly;
86 static unsigned int key_remote_ports __read_mostly;
87 static unsigned int local_remote_ports __read_mostly;
88 static unsigned int remote_ports __read_mostly;
89
90 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
91 #define rt_dst(rt) (rt->dst)
92 #else
93 #define rt_dst(rt) (rt->u.dst)
94 #endif
95
96 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
97 static struct hh_cache *rt_hh(struct rtable *rt)
98 {
99         struct neighbour *neigh = dst_get_neighbour(&rt->dst);
100         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
101                         !neigh->hh.hh_len)
102                 return NULL;
103         return &neigh->hh;
104 }
105 #else
106 #define rt_hh(rt) (rt_dst(rt).hh)
107 #endif
108
109 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
110 {
111         return vport_from_priv(tnl_vport);
112 }
113
114 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
115  * cache_lock is held, so it is only for update side code.
116  */
117 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
118 {
119         return rcu_dereference_protected(tnl_vport->cache,
120                                          lockdep_is_held(&tnl_vport->cache_lock));
121 }
122
123 static inline void schedule_cache_cleaner(void)
124 {
125         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
126 }
127
128 static void free_cache(struct tnl_cache *cache)
129 {
130         if (!cache)
131                 return;
132
133         flow_put(cache->flow);
134         ip_rt_put(cache->rt);
135         kfree(cache);
136 }
137
138 static void free_config_rcu(struct rcu_head *rcu)
139 {
140         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
141         kfree(c);
142 }
143
144 static void free_cache_rcu(struct rcu_head *rcu)
145 {
146         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
147         free_cache(c);
148 }
149
150 static void assign_config_rcu(struct vport *vport,
151                               struct tnl_mutable_config *new_config)
152 {
153         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
154         struct tnl_mutable_config *old_config;
155
156         old_config = rtnl_dereference(tnl_vport->mutable);
157         rcu_assign_pointer(tnl_vport->mutable, new_config);
158         call_rcu(&old_config->rcu, free_config_rcu);
159 }
160
161 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
162 {
163         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
164         struct tnl_cache *old_cache;
165
166         old_cache = cache_dereference(tnl_vport);
167         rcu_assign_pointer(tnl_vport->cache, new_cache);
168
169         if (old_cache)
170                 call_rcu(&old_cache->rcu, free_cache_rcu);
171 }
172
173 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
174 {
175         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
176                 if (mutable->key.saddr)
177                         return &local_remote_ports;
178                 else
179                         return &remote_ports;
180         } else {
181                 if (mutable->key.saddr)
182                         return &key_local_remote_ports;
183                 else
184                         return &key_remote_ports;
185         }
186 }
187
188 static u32 port_hash(const struct port_lookup_key *key)
189 {
190         return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0);
191 }
192
193 static inline struct hlist_head *find_bucket(u32 hash)
194 {
195         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
196 }
197
198 static void port_table_add_port(struct vport *vport)
199 {
200         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
201         const struct tnl_mutable_config *mutable;
202         u32 hash;
203
204         if (port_table_count == 0)
205                 schedule_cache_cleaner();
206
207         mutable = rtnl_dereference(tnl_vport->mutable);
208         hash = port_hash(&mutable->key);
209         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
210         port_table_count++;
211
212         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
213 }
214
215 static void port_table_move_port(struct vport *vport,
216                       struct tnl_mutable_config *new_mutable)
217 {
218         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
219         u32 hash;
220
221         hash = port_hash(&new_mutable->key);
222         hlist_del_init_rcu(&tnl_vport->hash_node);
223         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
224
225         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
226         assign_config_rcu(vport, new_mutable);
227         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
228 }
229
230 static void port_table_remove_port(struct vport *vport)
231 {
232         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
233
234         hlist_del_init_rcu(&tnl_vport->hash_node);
235
236         port_table_count--;
237         if (port_table_count == 0)
238                 cancel_delayed_work_sync(&cache_cleaner_wq);
239
240         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
241 }
242
243 static struct vport *port_table_lookup(struct port_lookup_key *key,
244                                        const struct tnl_mutable_config **pmutable)
245 {
246         struct hlist_node *n;
247         struct hlist_head *bucket;
248         u32 hash = port_hash(key);
249         struct tnl_vport * tnl_vport;
250
251         bucket = find_bucket(hash);
252
253         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
254                 struct tnl_mutable_config *mutable;
255
256                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
257                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
258                         *pmutable = mutable;
259                         return tnl_vport_to_vport(tnl_vport);
260                 }
261         }
262
263         return NULL;
264 }
265
266 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
267                             int tunnel_type,
268                             const struct tnl_mutable_config **mutable)
269 {
270         struct port_lookup_key lookup;
271         struct vport *vport;
272
273         lookup.saddr = saddr;
274         lookup.daddr = daddr;
275
276         /* First try for exact match on in_key. */
277         lookup.in_key = key;
278         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
279         if (key_local_remote_ports) {
280                 vport = port_table_lookup(&lookup, mutable);
281                 if (vport)
282                         return vport;
283         }
284         if (key_remote_ports) {
285                 lookup.saddr = 0;
286                 vport = port_table_lookup(&lookup, mutable);
287                 if (vport)
288                         return vport;
289
290                 lookup.saddr = saddr;
291         }
292
293         /* Then try matches that wildcard in_key. */
294         lookup.in_key = 0;
295         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
296         if (local_remote_ports) {
297                 vport = port_table_lookup(&lookup, mutable);
298                 if (vport)
299                         return vport;
300         }
301         if (remote_ports) {
302                 lookup.saddr = 0;
303                 vport = port_table_lookup(&lookup, mutable);
304                 if (vport)
305                         return vport;
306         }
307
308         return NULL;
309 }
310
311 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
312 {
313         if (unlikely(INET_ECN_is_ce(tos))) {
314                 __be16 protocol = skb->protocol;
315
316                 skb_set_network_header(skb, ETH_HLEN);
317
318                 if (protocol == htons(ETH_P_8021Q)) {
319                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
320                                 return;
321
322                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
323                         skb_set_network_header(skb, VLAN_ETH_HLEN);
324                 }
325
326                 if (protocol == htons(ETH_P_IP)) {
327                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
328                             + sizeof(struct iphdr))))
329                                 return;
330
331                         IP_ECN_set_ce(ip_hdr(skb));
332                 }
333 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
334                 else if (protocol == htons(ETH_P_IPV6)) {
335                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
336                             + sizeof(struct ipv6hdr))))
337                                 return;
338
339                         IP6_ECN_set_ce(ipv6_hdr(skb));
340                 }
341 #endif
342         }
343 }
344
345 /**
346  *      tnl_rcv - ingress point for generic tunnel code
347  *
348  * @vport: port this packet was received on
349  * @skb: received packet
350  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
351  *
352  * Must be called with rcu_read_lock.
353  *
354  * Packets received by this function are in the following state:
355  * - skb->data points to the inner Ethernet header.
356  * - The inner Ethernet header is in the linear data area.
357  * - skb->csum does not include the inner Ethernet header.
358  * - The layer pointers are undefined.
359  */
360 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
361 {
362         struct ethhdr *eh;
363
364         skb_reset_mac_header(skb);
365         eh = eth_hdr(skb);
366
367         if (likely(ntohs(eh->h_proto) >= 1536))
368                 skb->protocol = eh->h_proto;
369         else
370                 skb->protocol = htons(ETH_P_802_2);
371
372         skb_dst_drop(skb);
373         nf_reset(skb);
374         skb_clear_rxhash(skb);
375         secpath_reset(skb);
376
377         ecn_decapsulate(skb, tos);
378         vlan_set_tci(skb, 0);
379
380         if (unlikely(compute_ip_summed(skb, false))) {
381                 kfree_skb(skb);
382                 return;
383         }
384
385         vport_receive(vport, skb);
386 }
387
388 static bool check_ipv4_address(__be32 addr)
389 {
390         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
391             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
392                 return false;
393
394         return true;
395 }
396
397 static bool ipv4_should_icmp(struct sk_buff *skb)
398 {
399         struct iphdr *old_iph = ip_hdr(skb);
400
401         /* Don't respond to L2 broadcast. */
402         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
403                 return false;
404
405         /* Don't respond to L3 broadcast or invalid addresses. */
406         if (!check_ipv4_address(old_iph->daddr) ||
407             !check_ipv4_address(old_iph->saddr))
408                 return false;
409
410         /* Only respond to the first fragment. */
411         if (old_iph->frag_off & htons(IP_OFFSET))
412                 return false;
413
414         /* Don't respond to ICMP error messages. */
415         if (old_iph->protocol == IPPROTO_ICMP) {
416                 u8 icmp_type, *icmp_typep;
417
418                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
419                                                 (old_iph->ihl << 2) +
420                                                 offsetof(struct icmphdr, type) -
421                                                 skb->data, sizeof(icmp_type),
422                                                 &icmp_type);
423
424                 if (!icmp_typep)
425                         return false;
426
427                 if (*icmp_typep > NR_ICMP_TYPES
428                         || (*icmp_typep <= ICMP_PARAMETERPROB
429                                 && *icmp_typep != ICMP_ECHOREPLY
430                                 && *icmp_typep != ICMP_ECHO))
431                         return false;
432         }
433
434         return true;
435 }
436
437 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
438                             unsigned int mtu, unsigned int payload_length)
439 {
440         struct iphdr *iph, *old_iph = ip_hdr(skb);
441         struct icmphdr *icmph;
442         u8 *payload;
443
444         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
445         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
446         payload = skb_put(nskb, payload_length);
447
448         /* IP */
449         iph->version            =       4;
450         iph->ihl                =       sizeof(struct iphdr) >> 2;
451         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
452                                         IPTOS_PREC_INTERNETCONTROL;
453         iph->tot_len            =       htons(sizeof(struct iphdr)
454                                               + sizeof(struct icmphdr)
455                                               + payload_length);
456         get_random_bytes(&iph->id, sizeof(iph->id));
457         iph->frag_off           =       0;
458         iph->ttl                =       IPDEFTTL;
459         iph->protocol           =       IPPROTO_ICMP;
460         iph->daddr              =       old_iph->saddr;
461         iph->saddr              =       old_iph->daddr;
462
463         ip_send_check(iph);
464
465         /* ICMP */
466         icmph->type             =       ICMP_DEST_UNREACH;
467         icmph->code             =       ICMP_FRAG_NEEDED;
468         icmph->un.gateway       =       htonl(mtu);
469         icmph->checksum         =       0;
470
471         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
472         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
473                                             payload, payload_length,
474                                             nskb->csum);
475         icmph->checksum = csum_fold(nskb->csum);
476 }
477
478 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
479 static bool ipv6_should_icmp(struct sk_buff *skb)
480 {
481         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
482         int addr_type;
483         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
484         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
485
486         /* Check source address is valid. */
487         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
488         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
489                 return false;
490
491         /* Don't reply to unspecified addresses. */
492         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
493                 return false;
494
495         /* Don't respond to ICMP error messages. */
496         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
497         if (payload_off < 0)
498                 return false;
499
500         if (nexthdr == NEXTHDR_ICMP) {
501                 u8 icmp_type, *icmp_typep;
502
503                 icmp_typep = skb_header_pointer(skb, payload_off +
504                                                 offsetof(struct icmp6hdr,
505                                                         icmp6_type),
506                                                 sizeof(icmp_type), &icmp_type);
507
508                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
509                         return false;
510         }
511
512         return true;
513 }
514
515 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
516                             unsigned int mtu, unsigned int payload_length)
517 {
518         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
519         struct icmp6hdr *icmp6h;
520         u8 *payload;
521
522         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
523         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
524         payload = skb_put(nskb, payload_length);
525
526         /* IPv6 */
527         ipv6h->version          =       6;
528         ipv6h->priority         =       0;
529         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
530         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
531                                               + payload_length);
532         ipv6h->nexthdr          =       NEXTHDR_ICMP;
533         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
534         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
535         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
536
537         /* ICMPv6 */
538         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
539         icmp6h->icmp6_code      =       0;
540         icmp6h->icmp6_cksum     =       0;
541         icmp6h->icmp6_mtu       =       htonl(mtu);
542
543         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
544         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
545                                             payload, payload_length,
546                                             nskb->csum);
547         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
548                                                 sizeof(struct icmp6hdr)
549                                                 + payload_length,
550                                                 ipv6h->nexthdr, nskb->csum);
551 }
552 #endif /* IPv6 */
553
554 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
555                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
556 {
557         unsigned int eth_hdr_len = ETH_HLEN;
558         unsigned int total_length = 0, header_length = 0, payload_length;
559         struct ethhdr *eh, *old_eh = eth_hdr(skb);
560         struct sk_buff *nskb;
561
562         /* Sanity check */
563         if (skb->protocol == htons(ETH_P_IP)) {
564                 if (mtu < IP_MIN_MTU)
565                         return false;
566
567                 if (!ipv4_should_icmp(skb))
568                         return true;
569         }
570 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
571         else if (skb->protocol == htons(ETH_P_IPV6)) {
572                 if (mtu < IPV6_MIN_MTU)
573                         return false;
574
575                 /*
576                  * In theory we should do PMTUD on IPv6 multicast messages but
577                  * we don't have an address to send from so just fragment.
578                  */
579                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
580                         return false;
581
582                 if (!ipv6_should_icmp(skb))
583                         return true;
584         }
585 #endif
586         else
587                 return false;
588
589         /* Allocate */
590         if (old_eh->h_proto == htons(ETH_P_8021Q))
591                 eth_hdr_len = VLAN_ETH_HLEN;
592
593         payload_length = skb->len - eth_hdr_len;
594         if (skb->protocol == htons(ETH_P_IP)) {
595                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
596                 total_length = min_t(unsigned int, header_length +
597                                                    payload_length, 576);
598         }
599 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
600         else {
601                 header_length = sizeof(struct ipv6hdr) +
602                                 sizeof(struct icmp6hdr);
603                 total_length = min_t(unsigned int, header_length +
604                                                   payload_length, IPV6_MIN_MTU);
605         }
606 #endif
607
608         payload_length = total_length - header_length;
609
610         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
611                              payload_length);
612         if (!nskb)
613                 return false;
614
615         skb_reserve(nskb, NET_IP_ALIGN);
616
617         /* Ethernet / VLAN */
618         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
619         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
620         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
621         nskb->protocol = eh->h_proto = old_eh->h_proto;
622         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
623                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
624
625                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
626                 vh->h_vlan_encapsulated_proto = skb->protocol;
627         } else
628                 vlan_set_tci(nskb, vlan_get_tci(skb));
629         skb_reset_mac_header(nskb);
630
631         /* Protocol */
632         if (skb->protocol == htons(ETH_P_IP))
633                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
634 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
635         else
636                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
637 #endif
638
639         /*
640          * Assume that flow based keys are symmetric with respect to input
641          * and output and use the key that we were going to put on the
642          * outgoing packet for the fake received packet.  If the keys are
643          * not symmetric then PMTUD needs to be disabled since we won't have
644          * any way of synthesizing packets.
645          */
646         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
647             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
648                 OVS_CB(nskb)->tun_id = flow_key;
649
650         if (unlikely(compute_ip_summed(nskb, false))) {
651                 kfree_skb(nskb);
652                 return false;
653         }
654
655         vport_receive(vport, nskb);
656
657         return true;
658 }
659
660 static bool check_mtu(struct sk_buff *skb,
661                       struct vport *vport,
662                       const struct tnl_mutable_config *mutable,
663                       const struct rtable *rt, __be16 *frag_offp)
664 {
665         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
666         bool pmtud = mutable->flags & TNL_F_PMTUD;
667         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
668         int mtu = 0;
669         unsigned int packet_length = skb->len - ETH_HLEN;
670
671         /* Allow for one level of tagging in the packet length. */
672         if (!vlan_tx_tag_present(skb) &&
673             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
674                 packet_length -= VLAN_HLEN;
675
676         if (pmtud) {
677                 int vlan_header = 0;
678
679                 /* The tag needs to go in packet regardless of where it
680                  * currently is, so subtract it from the MTU.
681                  */
682                 if (vlan_tx_tag_present(skb) ||
683                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
684                         vlan_header = VLAN_HLEN;
685
686                 mtu = dst_mtu(&rt_dst(rt))
687                         - ETH_HLEN
688                         - mutable->tunnel_hlen
689                         - vlan_header;
690         }
691
692         if (skb->protocol == htons(ETH_P_IP)) {
693                 struct iphdr *iph = ip_hdr(skb);
694
695                 if (df_inherit)
696                         frag_off = iph->frag_off & htons(IP_DF);
697
698                 if (pmtud && iph->frag_off & htons(IP_DF)) {
699                         mtu = max(mtu, IP_MIN_MTU);
700
701                         if (packet_length > mtu &&
702                             tnl_frag_needed(vport, mutable, skb, mtu,
703                                             OVS_CB(skb)->tun_id))
704                                 return false;
705                 }
706         }
707 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
708         else if (skb->protocol == htons(ETH_P_IPV6)) {
709                 /* IPv6 requires end hosts to do fragmentation
710                  * if the packet is above the minimum MTU.
711                  */
712                 if (df_inherit && packet_length > IPV6_MIN_MTU)
713                         frag_off = htons(IP_DF);
714
715                 if (pmtud) {
716                         mtu = max(mtu, IPV6_MIN_MTU);
717
718                         if (packet_length > mtu &&
719                             tnl_frag_needed(vport, mutable, skb, mtu,
720                                             OVS_CB(skb)->tun_id))
721                                 return false;
722                 }
723         }
724 #endif
725
726         *frag_offp = frag_off;
727         return true;
728 }
729
730 static void create_tunnel_header(const struct vport *vport,
731                                  const struct tnl_mutable_config *mutable,
732                                  const struct rtable *rt, void *header)
733 {
734         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
735         struct iphdr *iph = header;
736
737         iph->version    = 4;
738         iph->ihl        = sizeof(struct iphdr) >> 2;
739         iph->frag_off   = htons(IP_DF);
740         iph->protocol   = tnl_vport->tnl_ops->ipproto;
741         iph->tos        = mutable->tos;
742         iph->daddr      = rt->rt_dst;
743         iph->saddr      = rt->rt_src;
744         iph->ttl        = mutable->ttl;
745         if (!iph->ttl)
746                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
747
748         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
749 }
750
751 static inline void *get_cached_header(const struct tnl_cache *cache)
752 {
753         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
754 }
755
756 static inline bool check_cache_valid(const struct tnl_cache *cache,
757                                      const struct tnl_mutable_config *mutable)
758 {
759         struct hh_cache *hh;
760
761         if (!cache)
762                 return false;
763
764         hh = rt_hh(cache->rt);
765         return hh &&
766 #ifdef NEED_CACHE_TIMEOUT
767                 time_before(jiffies, cache->expiration) &&
768 #endif
769 #ifdef HAVE_RT_GENID
770                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
771 #endif
772 #ifdef HAVE_HH_SEQ
773                 hh->hh_lock.sequence == cache->hh_seq &&
774 #endif
775                 mutable->seq == cache->mutable_seq &&
776                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
777                 (cache->flow && !cache->flow->dead));
778 }
779
780 static void __cache_cleaner(struct tnl_vport *tnl_vport)
781 {
782         const struct tnl_mutable_config *mutable =
783                         rcu_dereference(tnl_vport->mutable);
784         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
785
786         if (cache && !check_cache_valid(cache, mutable) &&
787             spin_trylock_bh(&tnl_vport->cache_lock)) {
788                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
789                 spin_unlock_bh(&tnl_vport->cache_lock);
790         }
791 }
792
793 static void cache_cleaner(struct work_struct *work)
794 {
795         int i;
796
797         schedule_cache_cleaner();
798
799         rcu_read_lock();
800         for (i = 0; i < PORT_TABLE_SIZE; i++) {
801                 struct hlist_node *n;
802                 struct hlist_head *bucket;
803                 struct tnl_vport  *tnl_vport;
804
805                 bucket = &port_table[i];
806                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
807                         __cache_cleaner(tnl_vport);
808         }
809         rcu_read_unlock();
810 }
811
812 static inline void create_eth_hdr(struct tnl_cache *cache,
813                                   struct hh_cache *hh)
814 {
815         void *cache_data = get_cached_header(cache);
816         int hh_off;
817
818 #ifdef HAVE_HH_SEQ
819         unsigned hh_seq;
820
821         do {
822                 hh_seq = read_seqbegin(&hh->hh_lock);
823                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
824                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
825                 cache->hh_len = hh->hh_len;
826         } while (read_seqretry(&hh->hh_lock, hh_seq));
827
828         cache->hh_seq = hh_seq;
829 #else
830         read_lock(&hh->hh_lock);
831         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
832         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
833         cache->hh_len = hh->hh_len;
834         read_unlock(&hh->hh_lock);
835 #endif
836 }
837
838 static struct tnl_cache *build_cache(struct vport *vport,
839                                      const struct tnl_mutable_config *mutable,
840                                      struct rtable *rt)
841 {
842         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
843         struct tnl_cache *cache;
844         void *cache_data;
845         int cache_len;
846         struct hh_cache *hh;
847
848         if (!(mutable->flags & TNL_F_HDR_CACHE))
849                 return NULL;
850
851         /*
852          * If there is no entry in the ARP cache or if this device does not
853          * support hard header caching just fall back to the IP stack.
854          */
855
856         hh = rt_hh(rt);
857         if (!hh)
858                 return NULL;
859
860         /*
861          * If lock is contended fall back to directly building the header.
862          * We're not going to help performance by sitting here spinning.
863          */
864         if (!spin_trylock(&tnl_vport->cache_lock))
865                 return NULL;
866
867         cache = cache_dereference(tnl_vport);
868         if (check_cache_valid(cache, mutable))
869                 goto unlock;
870         else
871                 cache = NULL;
872
873         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
874
875         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
876                         cache_len, GFP_ATOMIC);
877         if (!cache)
878                 goto unlock;
879
880         create_eth_hdr(cache, hh);
881         cache_data = get_cached_header(cache) + cache->hh_len;
882         cache->len = cache->hh_len + mutable->tunnel_hlen;
883
884         create_tunnel_header(vport, mutable, rt, cache_data);
885
886         cache->mutable_seq = mutable->seq;
887         cache->rt = rt;
888 #ifdef NEED_CACHE_TIMEOUT
889         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
890 #endif
891
892         if (is_internal_dev(rt_dst(rt).dev)) {
893                 struct sw_flow_key flow_key;
894                 struct vport *dst_vport;
895                 struct sk_buff *skb;
896                 bool is_frag;
897                 int err;
898                 int flow_key_len;
899                 struct sw_flow *flow;
900
901                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
902                 if (!dst_vport)
903                         goto done;
904
905                 skb = alloc_skb(cache->len, GFP_ATOMIC);
906                 if (!skb)
907                         goto done;
908
909                 __skb_put(skb, cache->len);
910                 memcpy(skb->data, get_cached_header(cache), cache->len);
911
912                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
913                                    &flow_key_len, &is_frag);
914
915                 consume_skb(skb);
916                 if (err || is_frag)
917                         goto done;
918
919                 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
920                                          &flow_key, flow_key_len);
921                 if (flow) {
922                         cache->flow = flow;
923                         flow_hold(flow);
924                 }
925         }
926
927 done:
928         assign_cache_rcu(vport, cache);
929
930 unlock:
931         spin_unlock(&tnl_vport->cache_lock);
932
933         return cache;
934 }
935
936 static struct rtable *find_route(struct vport *vport,
937                                  const struct tnl_mutable_config *mutable,
938                                  u8 tos, struct tnl_cache **cache)
939 {
940         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
941         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
942
943         *cache = NULL;
944         tos = RT_TOS(tos);
945
946         if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
947                 *cache = cur_cache;
948                 return cur_cache->rt;
949         } else {
950                 struct rtable *rt;
951 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
952                 struct flowi fl = { .nl_u = { .ip4_u =
953                                               { .daddr = mutable->key.daddr,
954                                                 .saddr = mutable->key.saddr,
955                                                 .tos = tos } },
956                                     .proto = tnl_vport->tnl_ops->ipproto };
957
958                 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
959                         return NULL;
960 #else
961                 struct flowi4 fl = { .daddr = mutable->key.daddr,
962                                      .saddr = mutable->key.saddr,
963                                      .flowi4_tos = tos,
964                                      .flowi4_proto = tnl_vport->tnl_ops->ipproto };
965
966                 rt = ip_route_output_key(&init_net, &fl);
967                 if (IS_ERR(rt))
968                         return NULL;
969 #endif
970
971                 if (likely(tos == mutable->tos))
972                         *cache = build_cache(vport, mutable, rt);
973
974                 return rt;
975         }
976 }
977
978 static inline bool need_linearize(const struct sk_buff *skb)
979 {
980         int i;
981
982         if (unlikely(skb_shinfo(skb)->frag_list))
983                 return true;
984
985         /*
986          * Generally speaking we should linearize if there are paged frags.
987          * However, if all of the refcounts are 1 we know nobody else can
988          * change them from underneath us and we can skip the linearization.
989          */
990         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
991                 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
992                         return true;
993
994         return false;
995 }
996
997 static struct sk_buff *handle_offloads(struct sk_buff *skb,
998                                        const struct tnl_mutable_config *mutable,
999                                        const struct rtable *rt)
1000 {
1001         int min_headroom;
1002         int err;
1003
1004         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1005                         + mutable->tunnel_hlen
1006                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1007
1008         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1009                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1010                                                 skb_headroom(skb) +
1011                                                 16);
1012                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1013                                         0, GFP_ATOMIC);
1014                 if (unlikely(err))
1015                         goto error_free;
1016         }
1017
1018         forward_ip_summed(skb, true);
1019
1020         if (skb_is_gso(skb)) {
1021                 struct sk_buff *nskb;
1022
1023                 nskb = skb_gso_segment(skb, 0);
1024                 if (IS_ERR(nskb)) {
1025                         kfree_skb(skb);
1026                         err = PTR_ERR(nskb);
1027                         goto error;
1028                 }
1029
1030                 consume_skb(skb);
1031                 skb = nskb;
1032         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1033                 /* Pages aren't locked and could change at any time.
1034                  * If this happens after we compute the checksum, the
1035                  * checksum will be wrong.  We linearize now to avoid
1036                  * this problem.
1037                  */
1038                 if (unlikely(need_linearize(skb))) {
1039                         err = __skb_linearize(skb);
1040                         if (unlikely(err))
1041                                 goto error_free;
1042                 }
1043
1044                 err = skb_checksum_help(skb);
1045                 if (unlikely(err))
1046                         goto error_free;
1047         }
1048
1049         set_ip_summed(skb, OVS_CSUM_NONE);
1050
1051         return skb;
1052
1053 error_free:
1054         kfree_skb(skb);
1055 error:
1056         return ERR_PTR(err);
1057 }
1058
1059 static int send_frags(struct sk_buff *skb,
1060                       const struct tnl_mutable_config *mutable)
1061 {
1062         int sent_len;
1063
1064         sent_len = 0;
1065         while (skb) {
1066                 struct sk_buff *next = skb->next;
1067                 int frag_len = skb->len - mutable->tunnel_hlen;
1068                 int err;
1069
1070                 skb->next = NULL;
1071                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1072
1073                 err = ip_local_out(skb);
1074                 skb = next;
1075                 if (unlikely(net_xmit_eval(err)))
1076                         goto free_frags;
1077                 sent_len += frag_len;
1078         }
1079
1080         return sent_len;
1081
1082 free_frags:
1083         /*
1084          * There's no point in continuing to send fragments once one has been
1085          * dropped so just free the rest.  This may help improve the congestion
1086          * that caused the first packet to be dropped.
1087          */
1088         tnl_free_linked_skbs(skb);
1089         return sent_len;
1090 }
1091
1092 int tnl_send(struct vport *vport, struct sk_buff *skb)
1093 {
1094         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1095         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1096
1097         enum vport_err_type err = VPORT_E_TX_ERROR;
1098         struct rtable *rt;
1099         struct dst_entry *unattached_dst = NULL;
1100         struct tnl_cache *cache;
1101         int sent_len = 0;
1102         __be16 frag_off = 0;
1103         u8 ttl;
1104         u8 inner_tos;
1105         u8 tos;
1106
1107         /* Validate the protocol headers before we try to use them. */
1108         if (skb->protocol == htons(ETH_P_8021Q) &&
1109             !vlan_tx_tag_present(skb)) {
1110                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1111                         goto error_free;
1112
1113                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1114                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1115         }
1116
1117         if (skb->protocol == htons(ETH_P_IP)) {
1118                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1119                     + sizeof(struct iphdr))))
1120                         skb->protocol = 0;
1121         }
1122 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1123         else if (skb->protocol == htons(ETH_P_IPV6)) {
1124                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1125                     + sizeof(struct ipv6hdr))))
1126                         skb->protocol = 0;
1127         }
1128 #endif
1129
1130         /* ToS */
1131         if (skb->protocol == htons(ETH_P_IP))
1132                 inner_tos = ip_hdr(skb)->tos;
1133 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1134         else if (skb->protocol == htons(ETH_P_IPV6))
1135                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1136 #endif
1137         else
1138                 inner_tos = 0;
1139
1140         if (mutable->flags & TNL_F_TOS_INHERIT)
1141                 tos = inner_tos;
1142         else
1143                 tos = mutable->tos;
1144
1145         tos = INET_ECN_encapsulate(tos, inner_tos);
1146
1147         /* Route lookup */
1148         rt = find_route(vport, mutable, tos, &cache);
1149         if (unlikely(!rt))
1150                 goto error_free;
1151         if (unlikely(!cache))
1152                 unattached_dst = &rt_dst(rt);
1153
1154         /* Reset SKB */
1155         nf_reset(skb);
1156         secpath_reset(skb);
1157         skb_dst_drop(skb);
1158         skb_clear_rxhash(skb);
1159
1160         /* Offloading */
1161         skb = handle_offloads(skb, mutable, rt);
1162         if (IS_ERR(skb))
1163                 goto error;
1164
1165         /* MTU */
1166         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1167                 err = VPORT_E_TX_DROPPED;
1168                 goto error_free;
1169         }
1170
1171         /*
1172          * If we are over the MTU, allow the IP stack to handle fragmentation.
1173          * Fragmentation is a slow path anyways.
1174          */
1175         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1176                      cache)) {
1177                 unattached_dst = &rt_dst(rt);
1178                 dst_hold(unattached_dst);
1179                 cache = NULL;
1180         }
1181
1182         /* TTL */
1183         ttl = mutable->ttl;
1184         if (!ttl)
1185                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1186
1187         if (mutable->flags & TNL_F_TTL_INHERIT) {
1188                 if (skb->protocol == htons(ETH_P_IP))
1189                         ttl = ip_hdr(skb)->ttl;
1190 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1191                 else if (skb->protocol == htons(ETH_P_IPV6))
1192                         ttl = ipv6_hdr(skb)->hop_limit;
1193 #endif
1194         }
1195
1196         while (skb) {
1197                 struct iphdr *iph;
1198                 struct sk_buff *next_skb = skb->next;
1199                 skb->next = NULL;
1200
1201                 if (unlikely(vlan_deaccel_tag(skb)))
1202                         goto next;
1203
1204                 if (likely(cache)) {
1205                         skb_push(skb, cache->len);
1206                         memcpy(skb->data, get_cached_header(cache), cache->len);
1207                         skb_reset_mac_header(skb);
1208                         skb_set_network_header(skb, cache->hh_len);
1209
1210                 } else {
1211                         skb_push(skb, mutable->tunnel_hlen);
1212                         create_tunnel_header(vport, mutable, rt, skb->data);
1213                         skb_reset_network_header(skb);
1214
1215                         if (next_skb)
1216                                 skb_dst_set(skb, dst_clone(unattached_dst));
1217                         else {
1218                                 skb_dst_set(skb, unattached_dst);
1219                                 unattached_dst = NULL;
1220                         }
1221                 }
1222                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1223
1224                 iph = ip_hdr(skb);
1225                 iph->tos = tos;
1226                 iph->ttl = ttl;
1227                 iph->frag_off = frag_off;
1228                 ip_select_ident(iph, &rt_dst(rt), NULL);
1229
1230                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1231                 if (unlikely(!skb))
1232                         goto next;
1233
1234                 if (likely(cache)) {
1235                         int orig_len = skb->len - cache->len;
1236                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1237
1238                         skb->protocol = htons(ETH_P_IP);
1239                         iph = ip_hdr(skb);
1240                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1241                         ip_send_check(iph);
1242
1243                         if (cache_vport) {
1244                                 if (unlikely(compute_ip_summed(skb, true))) {
1245                                         kfree_skb(skb);
1246                                         goto next;
1247                                 }
1248
1249                                 OVS_CB(skb)->flow = cache->flow;
1250                                 vport_receive(cache_vport, skb);
1251                                 sent_len += orig_len;
1252                         } else {
1253                                 int xmit_err;
1254
1255                                 skb->dev = rt_dst(rt).dev;
1256                                 xmit_err = dev_queue_xmit(skb);
1257
1258                                 if (likely(net_xmit_eval(xmit_err) == 0))
1259                                         sent_len += orig_len;
1260                         }
1261                 } else
1262                         sent_len += send_frags(skb, mutable);
1263
1264 next:
1265                 skb = next_skb;
1266         }
1267
1268         if (unlikely(sent_len == 0))
1269                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1270
1271         goto out;
1272
1273 error_free:
1274         tnl_free_linked_skbs(skb);
1275 error:
1276         vport_record_error(vport, err);
1277 out:
1278         dst_release(unattached_dst);
1279         return sent_len;
1280 }
1281
1282 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1283         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1284         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1285         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1286         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1287         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1288         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1289         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1290 };
1291
1292 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1293 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1294                           const struct vport *cur_vport,
1295                           struct tnl_mutable_config *mutable)
1296 {
1297         const struct vport *old_vport;
1298         const struct tnl_mutable_config *old_mutable;
1299         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1300         int err;
1301
1302         if (!options)
1303                 return -EINVAL;
1304
1305         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1306         if (err)
1307                 return err;
1308
1309         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1310                 return -EINVAL;
1311
1312         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1313
1314         if (a[OVS_TUNNEL_ATTR_SRC_IPV4])
1315                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1316         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1317
1318         if (a[OVS_TUNNEL_ATTR_TOS]) {
1319                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1320                 if (mutable->tos != RT_TOS(mutable->tos))
1321                         return -EINVAL;
1322         }
1323
1324         if (a[OVS_TUNNEL_ATTR_TTL])
1325                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1326
1327         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1328         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1329                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1330                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1331         } else {
1332                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1333                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1334         }
1335
1336         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1337                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1338         else
1339                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1340
1341         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1342         if (mutable->tunnel_hlen < 0)
1343                 return mutable->tunnel_hlen;
1344
1345         mutable->tunnel_hlen += sizeof(struct iphdr);
1346
1347         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1348         if (old_vport && old_vport != cur_vport)
1349                 return -EEXIST;
1350
1351         return 0;
1352 }
1353
1354 struct vport *tnl_create(const struct vport_parms *parms,
1355                          const struct vport_ops *vport_ops,
1356                          const struct tnl_ops *tnl_ops)
1357 {
1358         struct vport *vport;
1359         struct tnl_vport *tnl_vport;
1360         struct tnl_mutable_config *mutable;
1361         int initial_frag_id;
1362         int err;
1363
1364         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1365         if (IS_ERR(vport)) {
1366                 err = PTR_ERR(vport);
1367                 goto error;
1368         }
1369
1370         tnl_vport = tnl_vport_priv(vport);
1371
1372         strcpy(tnl_vport->name, parms->name);
1373         tnl_vport->tnl_ops = tnl_ops;
1374
1375         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1376         if (!mutable) {
1377                 err = -ENOMEM;
1378                 goto error_free_vport;
1379         }
1380
1381         vport_gen_rand_ether_addr(mutable->eth_addr);
1382
1383         get_random_bytes(&initial_frag_id, sizeof(int));
1384         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1385
1386         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1387         if (err)
1388                 goto error_free_mutable;
1389
1390         spin_lock_init(&tnl_vport->cache_lock);
1391
1392 #ifdef NEED_CACHE_TIMEOUT
1393         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1394                                        (net_random() % (MAX_CACHE_EXP / 2));
1395 #endif
1396
1397         rcu_assign_pointer(tnl_vport->mutable, mutable);
1398
1399         port_table_add_port(vport);
1400         return vport;
1401
1402 error_free_mutable:
1403         kfree(mutable);
1404 error_free_vport:
1405         vport_free(vport);
1406 error:
1407         return ERR_PTR(err);
1408 }
1409
1410 int tnl_set_options(struct vport *vport, struct nlattr *options)
1411 {
1412         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1413         const struct tnl_mutable_config *old_mutable;
1414         struct tnl_mutable_config *mutable;
1415         int err;
1416
1417         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1418         if (!mutable) {
1419                 err = -ENOMEM;
1420                 goto error;
1421         }
1422
1423         /* Copy fields whose values should be retained. */
1424         old_mutable = rtnl_dereference(tnl_vport->mutable);
1425         mutable->seq = old_mutable->seq + 1;
1426         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1427
1428         /* Parse the others configured by userspace. */
1429         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1430         if (err)
1431                 goto error_free;
1432
1433         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1434                 port_table_move_port(vport, mutable);
1435         else
1436                 assign_config_rcu(vport, mutable);
1437
1438         return 0;
1439
1440 error_free:
1441         kfree(mutable);
1442 error:
1443         return err;
1444 }
1445
1446 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1447 {
1448         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1449         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1450
1451         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1452         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1453
1454         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1455                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1456         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1457                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1458         if (mutable->key.saddr)
1459                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1460         if (mutable->tos)
1461                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1462         if (mutable->ttl)
1463                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1464
1465         return 0;
1466
1467 nla_put_failure:
1468         return -EMSGSIZE;
1469 }
1470
1471 static void free_port_rcu(struct rcu_head *rcu)
1472 {
1473         struct tnl_vport *tnl_vport = container_of(rcu,
1474                                                    struct tnl_vport, rcu);
1475
1476         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1477         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1478         vport_free(tnl_vport_to_vport(tnl_vport));
1479 }
1480
1481 void tnl_destroy(struct vport *vport)
1482 {
1483         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1484         const struct tnl_mutable_config *mutable;
1485
1486         mutable = rtnl_dereference(tnl_vport->mutable);
1487         port_table_remove_port(vport);
1488         call_rcu(&tnl_vport->rcu, free_port_rcu);
1489 }
1490
1491 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1492 {
1493         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1494         struct tnl_mutable_config *mutable;
1495
1496         mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1497                           sizeof(struct tnl_mutable_config), GFP_KERNEL);
1498         if (!mutable)
1499                 return -ENOMEM;
1500
1501         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1502         assign_config_rcu(vport, mutable);
1503
1504         return 0;
1505 }
1506
1507 const char *tnl_get_name(const struct vport *vport)
1508 {
1509         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1510         return tnl_vport->name;
1511 }
1512
1513 const unsigned char *tnl_get_addr(const struct vport *vport)
1514 {
1515         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1516         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1517 }
1518
1519 void tnl_free_linked_skbs(struct sk_buff *skb)
1520 {
1521         while (skb) {
1522                 struct sk_buff *next = skb->next;
1523                 kfree_skb(skb);
1524                 skb = next;
1525         }
1526 }
1527
1528 int tnl_init(void)
1529 {
1530         int i;
1531
1532         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1533                         GFP_KERNEL);
1534         if (!port_table)
1535                 return -ENOMEM;
1536
1537         for (i = 0; i < PORT_TABLE_SIZE; i++)
1538                 INIT_HLIST_HEAD(&port_table[i]);
1539
1540         return 0;
1541 }
1542
1543 void tnl_exit(void)
1544 {
1545         int i;
1546
1547         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1548                 struct tnl_vport * tnl_vport;
1549                 struct hlist_head *hash_head;
1550                 struct hlist_node *n;
1551
1552                 hash_head = &port_table[i];
1553                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1554                         BUG();
1555                         goto out;
1556                 }
1557         }
1558 out:
1559         kfree(port_table);
1560 }