datapath: Stop using NLA_PUT*().
[openvswitch] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2012 Nicira Networks.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if_arp.h>
22 #include <linux/if_ether.h>
23 #include <linux/ip.h>
24 #include <linux/if_vlan.h>
25 #include <linux/igmp.h>
26 #include <linux/in.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33 #include <linux/workqueue.h>
34 #include <linux/rculist.h>
35
36 #include <net/dsfield.h>
37 #include <net/dst.h>
38 #include <net/icmp.h>
39 #include <net/inet_ecn.h>
40 #include <net/ip.h>
41 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
42 #include <net/ipv6.h>
43 #endif
44 #include <net/route.h>
45 #include <net/xfrm.h>
46
47 #include "checksum.h"
48 #include "datapath.h"
49 #include "tunnel.h"
50 #include "vlan.h"
51 #include "vport.h"
52 #include "vport-generic.h"
53 #include "vport-internal_dev.h"
54
55 #ifdef NEED_CACHE_TIMEOUT
56 /*
57  * On kernels where we can't quickly detect changes in the rest of the system
58  * we use an expiration time to invalidate the cache.  A shorter expiration
59  * reduces the length of time that we may potentially blackhole packets while
60  * a longer time increases performance by reducing the frequency that the
61  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
62  * invalidated before the expiration time but this is the maximum.  The time
63  * is expressed in jiffies.
64  */
65 #define MAX_CACHE_EXP HZ
66 #endif
67
68 /*
69  * Interval to check for and remove caches that are no longer valid.  Caches
70  * are checked for validity before they are used for packet encapsulation and
71  * old caches are removed at that time.  However, if no packets are sent through
72  * the tunnel then the cache will never be destroyed.  Since it holds
73  * references to a number of system objects, the cache will continue to use
74  * system resources by not allowing those objects to be destroyed.  The cache
75  * cleaner is periodically run to free invalid caches.  It does not
76  * significantly affect system performance.  A lower interval will release
77  * resources faster but will itself consume resources by requiring more frequent
78  * checks.  A longer interval may result in messages being printed to the kernel
79  * message buffer about unreleased resources.  The interval is expressed in
80  * jiffies.
81  */
82 #define CACHE_CLEANER_INTERVAL (5 * HZ)
83
84 #define CACHE_DATA_ALIGN 16
85 #define PORT_TABLE_SIZE  1024
86
87 static struct hlist_head *port_table __read_mostly;
88 static int port_table_count;
89
90 static void cache_cleaner(struct work_struct *work);
91 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
92
93 /*
94  * These are just used as an optimization: they don't require any kind of
95  * synchronization because we could have just as easily read the value before
96  * the port change happened.
97  */
98 static unsigned int key_local_remote_ports __read_mostly;
99 static unsigned int key_remote_ports __read_mostly;
100 static unsigned int key_multicast_ports __read_mostly;
101 static unsigned int local_remote_ports __read_mostly;
102 static unsigned int remote_ports __read_mostly;
103 static unsigned int multicast_ports __read_mostly;
104
105 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
106 #define rt_dst(rt) (rt->dst)
107 #else
108 #define rt_dst(rt) (rt->u.dst)
109 #endif
110
111 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
112 static struct hh_cache *rt_hh(struct rtable *rt)
113 {
114         struct neighbour *neigh = dst_get_neighbour_noref(&rt->dst);
115         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
116                         !neigh->hh.hh_len)
117                 return NULL;
118         return &neigh->hh;
119 }
120 #else
121 #define rt_hh(rt) (rt_dst(rt).hh)
122 #endif
123
124 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
125 {
126         return vport_from_priv(tnl_vport);
127 }
128
129 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
130  * cache_lock is held, so it is only for update side code.
131  */
132 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
133 {
134         return rcu_dereference_protected(tnl_vport->cache,
135                                  lockdep_is_held(&tnl_vport->cache_lock));
136 }
137
138 static void schedule_cache_cleaner(void)
139 {
140         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
141 }
142
143 static void free_cache(struct tnl_cache *cache)
144 {
145         if (!cache)
146                 return;
147
148         ovs_flow_put(cache->flow);
149         ip_rt_put(cache->rt);
150         kfree(cache);
151 }
152
153 static void free_config_rcu(struct rcu_head *rcu)
154 {
155         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
156         kfree(c);
157 }
158
159 static void free_cache_rcu(struct rcu_head *rcu)
160 {
161         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
162         free_cache(c);
163 }
164
165 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
166  * within an RCU callback.  Fortunately this part doesn't require waiting for
167  * an RCU grace period.
168  */
169 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
170 {
171         ASSERT_RTNL();
172         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
173                 struct in_device *in_dev;
174                 in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
175                 if (in_dev)
176                         ip_mc_dec_group(in_dev, mutable->key.daddr);
177         }
178 }
179
180 static void assign_config_rcu(struct vport *vport,
181                               struct tnl_mutable_config *new_config)
182 {
183         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
184         struct tnl_mutable_config *old_config;
185
186         old_config = rtnl_dereference(tnl_vport->mutable);
187         rcu_assign_pointer(tnl_vport->mutable, new_config);
188
189         free_mutable_rtnl(old_config);
190         call_rcu(&old_config->rcu, free_config_rcu);
191 }
192
193 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
194 {
195         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
196         struct tnl_cache *old_cache;
197
198         old_cache = cache_dereference(tnl_vport);
199         rcu_assign_pointer(tnl_vport->cache, new_cache);
200
201         if (old_cache)
202                 call_rcu(&old_cache->rcu, free_cache_rcu);
203 }
204
205 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
206 {
207         bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
208
209         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
210                 if (mutable->key.saddr)
211                         return &local_remote_ports;
212                 else if (is_multicast)
213                         return &multicast_ports;
214                 else
215                         return &remote_ports;
216         } else {
217                 if (mutable->key.saddr)
218                         return &key_local_remote_ports;
219                 else if (is_multicast)
220                         return &key_multicast_ports;
221                 else
222                         return &key_remote_ports;
223         }
224 }
225
226 static u32 port_hash(const struct port_lookup_key *key)
227 {
228         return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
229 }
230
231 static struct hlist_head *find_bucket(u32 hash)
232 {
233         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
234 }
235
236 static void port_table_add_port(struct vport *vport)
237 {
238         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
239         const struct tnl_mutable_config *mutable;
240         u32 hash;
241
242         if (port_table_count == 0)
243                 schedule_cache_cleaner();
244
245         mutable = rtnl_dereference(tnl_vport->mutable);
246         hash = port_hash(&mutable->key);
247         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
248         port_table_count++;
249
250         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
251 }
252
253 static void port_table_move_port(struct vport *vport,
254                       struct tnl_mutable_config *new_mutable)
255 {
256         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
257         u32 hash;
258
259         hash = port_hash(&new_mutable->key);
260         hlist_del_init_rcu(&tnl_vport->hash_node);
261         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
262
263         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
264         assign_config_rcu(vport, new_mutable);
265         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
266 }
267
268 static void port_table_remove_port(struct vport *vport)
269 {
270         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
271
272         hlist_del_init_rcu(&tnl_vport->hash_node);
273
274         port_table_count--;
275         if (port_table_count == 0)
276                 cancel_delayed_work_sync(&cache_cleaner_wq);
277
278         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
279 }
280
281 static struct vport *port_table_lookup(struct port_lookup_key *key,
282                                        const struct tnl_mutable_config **pmutable)
283 {
284         struct hlist_node *n;
285         struct hlist_head *bucket;
286         u32 hash = port_hash(key);
287         struct tnl_vport *tnl_vport;
288
289         bucket = find_bucket(hash);
290
291         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
292                 struct tnl_mutable_config *mutable;
293
294                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
295                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
296                         *pmutable = mutable;
297                         return tnl_vport_to_vport(tnl_vport);
298                 }
299         }
300
301         return NULL;
302 }
303
304 struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
305                                 __be64 key, int tunnel_type,
306                                 const struct tnl_mutable_config **mutable)
307 {
308         struct port_lookup_key lookup;
309         struct vport *vport;
310         bool is_multicast = ipv4_is_multicast(saddr);
311
312         port_key_set_net(&lookup, net);
313         lookup.saddr = saddr;
314         lookup.daddr = daddr;
315
316         /* First try for exact match on in_key. */
317         lookup.in_key = key;
318         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
319         if (!is_multicast && key_local_remote_ports) {
320                 vport = port_table_lookup(&lookup, mutable);
321                 if (vport)
322                         return vport;
323         }
324         if (key_remote_ports) {
325                 lookup.saddr = 0;
326                 vport = port_table_lookup(&lookup, mutable);
327                 if (vport)
328                         return vport;
329
330                 lookup.saddr = saddr;
331         }
332
333         /* Then try matches that wildcard in_key. */
334         lookup.in_key = 0;
335         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
336         if (!is_multicast && local_remote_ports) {
337                 vport = port_table_lookup(&lookup, mutable);
338                 if (vport)
339                         return vport;
340         }
341         if (remote_ports) {
342                 lookup.saddr = 0;
343                 vport = port_table_lookup(&lookup, mutable);
344                 if (vport)
345                         return vport;
346         }
347
348         if (is_multicast) {
349                 lookup.saddr = 0;
350                 lookup.daddr = saddr;
351                 if (key_multicast_ports) {
352                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
353                         lookup.in_key = key;
354                         vport = port_table_lookup(&lookup, mutable);
355                         if (vport)
356                                 return vport;
357                 }
358                 if (multicast_ports) {
359                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
360                         lookup.in_key = 0;
361                         vport = port_table_lookup(&lookup, mutable);
362                         if (vport)
363                                 return vport;
364                 }
365         }
366
367         return NULL;
368 }
369
370 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
371 {
372         if (unlikely(INET_ECN_is_ce(tos))) {
373                 __be16 protocol = skb->protocol;
374
375                 skb_set_network_header(skb, ETH_HLEN);
376
377                 if (protocol == htons(ETH_P_8021Q)) {
378                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
379                                 return;
380
381                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
382                         skb_set_network_header(skb, VLAN_ETH_HLEN);
383                 }
384
385                 if (protocol == htons(ETH_P_IP)) {
386                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
387                             + sizeof(struct iphdr))))
388                                 return;
389
390                         IP_ECN_set_ce(ip_hdr(skb));
391                 }
392 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
393                 else if (protocol == htons(ETH_P_IPV6)) {
394                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
395                             + sizeof(struct ipv6hdr))))
396                                 return;
397
398                         IP6_ECN_set_ce(ipv6_hdr(skb));
399                 }
400 #endif
401         }
402 }
403
404 /**
405  *      ovs_tnl_rcv - ingress point for generic tunnel code
406  *
407  * @vport: port this packet was received on
408  * @skb: received packet
409  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
410  *
411  * Must be called with rcu_read_lock.
412  *
413  * Packets received by this function are in the following state:
414  * - skb->data points to the inner Ethernet header.
415  * - The inner Ethernet header is in the linear data area.
416  * - skb->csum does not include the inner Ethernet header.
417  * - The layer pointers are undefined.
418  */
419 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
420 {
421         struct ethhdr *eh;
422
423         skb_reset_mac_header(skb);
424         eh = eth_hdr(skb);
425
426         if (likely(ntohs(eh->h_proto) >= 1536))
427                 skb->protocol = eh->h_proto;
428         else
429                 skb->protocol = htons(ETH_P_802_2);
430
431         skb_dst_drop(skb);
432         nf_reset(skb);
433         skb_clear_rxhash(skb);
434         secpath_reset(skb);
435
436         ecn_decapsulate(skb, tos);
437         vlan_set_tci(skb, 0);
438
439         if (unlikely(compute_ip_summed(skb, false))) {
440                 kfree_skb(skb);
441                 return;
442         }
443
444         ovs_vport_receive(vport, skb);
445 }
446
447 static bool check_ipv4_address(__be32 addr)
448 {
449         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
450             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
451                 return false;
452
453         return true;
454 }
455
456 static bool ipv4_should_icmp(struct sk_buff *skb)
457 {
458         struct iphdr *old_iph = ip_hdr(skb);
459
460         /* Don't respond to L2 broadcast. */
461         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
462                 return false;
463
464         /* Don't respond to L3 broadcast or invalid addresses. */
465         if (!check_ipv4_address(old_iph->daddr) ||
466             !check_ipv4_address(old_iph->saddr))
467                 return false;
468
469         /* Only respond to the first fragment. */
470         if (old_iph->frag_off & htons(IP_OFFSET))
471                 return false;
472
473         /* Don't respond to ICMP error messages. */
474         if (old_iph->protocol == IPPROTO_ICMP) {
475                 u8 icmp_type, *icmp_typep;
476
477                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
478                                                 (old_iph->ihl << 2) +
479                                                 offsetof(struct icmphdr, type) -
480                                                 skb->data, sizeof(icmp_type),
481                                                 &icmp_type);
482
483                 if (!icmp_typep)
484                         return false;
485
486                 if (*icmp_typep > NR_ICMP_TYPES
487                         || (*icmp_typep <= ICMP_PARAMETERPROB
488                                 && *icmp_typep != ICMP_ECHOREPLY
489                                 && *icmp_typep != ICMP_ECHO))
490                         return false;
491         }
492
493         return true;
494 }
495
496 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
497                             unsigned int mtu, unsigned int payload_length)
498 {
499         struct iphdr *iph, *old_iph = ip_hdr(skb);
500         struct icmphdr *icmph;
501         u8 *payload;
502
503         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
504         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
505         payload = skb_put(nskb, payload_length);
506
507         /* IP */
508         iph->version            =       4;
509         iph->ihl                =       sizeof(struct iphdr) >> 2;
510         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
511                                         IPTOS_PREC_INTERNETCONTROL;
512         iph->tot_len            =       htons(sizeof(struct iphdr)
513                                               + sizeof(struct icmphdr)
514                                               + payload_length);
515         get_random_bytes(&iph->id, sizeof(iph->id));
516         iph->frag_off           =       0;
517         iph->ttl                =       IPDEFTTL;
518         iph->protocol           =       IPPROTO_ICMP;
519         iph->daddr              =       old_iph->saddr;
520         iph->saddr              =       old_iph->daddr;
521
522         ip_send_check(iph);
523
524         /* ICMP */
525         icmph->type             =       ICMP_DEST_UNREACH;
526         icmph->code             =       ICMP_FRAG_NEEDED;
527         icmph->un.gateway       =       htonl(mtu);
528         icmph->checksum         =       0;
529
530         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
531         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
532                                             payload, payload_length,
533                                             nskb->csum);
534         icmph->checksum = csum_fold(nskb->csum);
535 }
536
537 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
538 static bool ipv6_should_icmp(struct sk_buff *skb)
539 {
540         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
541         int addr_type;
542         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
543         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
544         __be16 frag_off;
545
546         /* Check source address is valid. */
547         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
548         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
549                 return false;
550
551         /* Don't reply to unspecified addresses. */
552         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
553                 return false;
554
555         /* Don't respond to ICMP error messages. */
556         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
557         if (payload_off < 0)
558                 return false;
559
560         if (nexthdr == NEXTHDR_ICMP) {
561                 u8 icmp_type, *icmp_typep;
562
563                 icmp_typep = skb_header_pointer(skb, payload_off +
564                                                 offsetof(struct icmp6hdr,
565                                                         icmp6_type),
566                                                 sizeof(icmp_type), &icmp_type);
567
568                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
569                         return false;
570         }
571
572         return true;
573 }
574
575 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
576                             unsigned int mtu, unsigned int payload_length)
577 {
578         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
579         struct icmp6hdr *icmp6h;
580         u8 *payload;
581
582         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
583         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
584         payload = skb_put(nskb, payload_length);
585
586         /* IPv6 */
587         ipv6h->version          =       6;
588         ipv6h->priority         =       0;
589         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
590         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
591                                               + payload_length);
592         ipv6h->nexthdr          =       NEXTHDR_ICMP;
593         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
594         ipv6h->daddr            =       old_ipv6h->saddr;
595         ipv6h->saddr            =       old_ipv6h->daddr;
596
597         /* ICMPv6 */
598         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
599         icmp6h->icmp6_code      =       0;
600         icmp6h->icmp6_cksum     =       0;
601         icmp6h->icmp6_mtu       =       htonl(mtu);
602
603         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
604         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
605                                             payload, payload_length,
606                                             nskb->csum);
607         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
608                                                 sizeof(struct icmp6hdr)
609                                                 + payload_length,
610                                                 ipv6h->nexthdr, nskb->csum);
611 }
612 #endif /* IPv6 */
613
614 bool ovs_tnl_frag_needed(struct vport *vport,
615                          const struct tnl_mutable_config *mutable,
616                          struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
617 {
618         unsigned int eth_hdr_len = ETH_HLEN;
619         unsigned int total_length = 0, header_length = 0, payload_length;
620         struct ethhdr *eh, *old_eh = eth_hdr(skb);
621         struct sk_buff *nskb;
622
623         /* Sanity check */
624         if (skb->protocol == htons(ETH_P_IP)) {
625                 if (mtu < IP_MIN_MTU)
626                         return false;
627
628                 if (!ipv4_should_icmp(skb))
629                         return true;
630         }
631 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
632         else if (skb->protocol == htons(ETH_P_IPV6)) {
633                 if (mtu < IPV6_MIN_MTU)
634                         return false;
635
636                 /*
637                  * In theory we should do PMTUD on IPv6 multicast messages but
638                  * we don't have an address to send from so just fragment.
639                  */
640                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
641                         return false;
642
643                 if (!ipv6_should_icmp(skb))
644                         return true;
645         }
646 #endif
647         else
648                 return false;
649
650         /* Allocate */
651         if (old_eh->h_proto == htons(ETH_P_8021Q))
652                 eth_hdr_len = VLAN_ETH_HLEN;
653
654         payload_length = skb->len - eth_hdr_len;
655         if (skb->protocol == htons(ETH_P_IP)) {
656                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
657                 total_length = min_t(unsigned int, header_length +
658                                                    payload_length, 576);
659         }
660 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
661         else {
662                 header_length = sizeof(struct ipv6hdr) +
663                                 sizeof(struct icmp6hdr);
664                 total_length = min_t(unsigned int, header_length +
665                                                   payload_length, IPV6_MIN_MTU);
666         }
667 #endif
668
669         payload_length = total_length - header_length;
670
671         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
672                              payload_length);
673         if (!nskb)
674                 return false;
675
676         skb_reserve(nskb, NET_IP_ALIGN);
677
678         /* Ethernet / VLAN */
679         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
680         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
681         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
682         nskb->protocol = eh->h_proto = old_eh->h_proto;
683         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
684                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
685
686                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
687                 vh->h_vlan_encapsulated_proto = skb->protocol;
688         } else
689                 vlan_set_tci(nskb, vlan_get_tci(skb));
690         skb_reset_mac_header(nskb);
691
692         /* Protocol */
693         if (skb->protocol == htons(ETH_P_IP))
694                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
695 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
696         else
697                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
698 #endif
699
700         /*
701          * Assume that flow based keys are symmetric with respect to input
702          * and output and use the key that we were going to put on the
703          * outgoing packet for the fake received packet.  If the keys are
704          * not symmetric then PMTUD needs to be disabled since we won't have
705          * any way of synthesizing packets.
706          */
707         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
708             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
709                 OVS_CB(nskb)->tun_id = flow_key;
710
711         if (unlikely(compute_ip_summed(nskb, false))) {
712                 kfree_skb(nskb);
713                 return false;
714         }
715
716         ovs_vport_receive(vport, nskb);
717
718         return true;
719 }
720
721 static bool check_mtu(struct sk_buff *skb,
722                       struct vport *vport,
723                       const struct tnl_mutable_config *mutable,
724                       const struct rtable *rt, __be16 *frag_offp)
725 {
726         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
727         bool pmtud = mutable->flags & TNL_F_PMTUD;
728         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
729         int mtu = 0;
730         unsigned int packet_length = skb->len - ETH_HLEN;
731
732         /* Allow for one level of tagging in the packet length. */
733         if (!vlan_tx_tag_present(skb) &&
734             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
735                 packet_length -= VLAN_HLEN;
736
737         if (pmtud) {
738                 int vlan_header = 0;
739
740                 /* The tag needs to go in packet regardless of where it
741                  * currently is, so subtract it from the MTU.
742                  */
743                 if (vlan_tx_tag_present(skb) ||
744                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
745                         vlan_header = VLAN_HLEN;
746
747                 mtu = dst_mtu(&rt_dst(rt))
748                         - ETH_HLEN
749                         - mutable->tunnel_hlen
750                         - vlan_header;
751         }
752
753         if (skb->protocol == htons(ETH_P_IP)) {
754                 struct iphdr *iph = ip_hdr(skb);
755
756                 if (df_inherit)
757                         frag_off = iph->frag_off & htons(IP_DF);
758
759                 if (pmtud && iph->frag_off & htons(IP_DF)) {
760                         mtu = max(mtu, IP_MIN_MTU);
761
762                         if (packet_length > mtu &&
763                             ovs_tnl_frag_needed(vport, mutable, skb, mtu,
764                                                 OVS_CB(skb)->tun_id))
765                                 return false;
766                 }
767         }
768 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
769         else if (skb->protocol == htons(ETH_P_IPV6)) {
770                 /* IPv6 requires end hosts to do fragmentation
771                  * if the packet is above the minimum MTU.
772                  */
773                 if (df_inherit && packet_length > IPV6_MIN_MTU)
774                         frag_off = htons(IP_DF);
775
776                 if (pmtud) {
777                         mtu = max(mtu, IPV6_MIN_MTU);
778
779                         if (packet_length > mtu &&
780                             ovs_tnl_frag_needed(vport, mutable, skb, mtu,
781                                                 OVS_CB(skb)->tun_id))
782                                 return false;
783                 }
784         }
785 #endif
786
787         *frag_offp = frag_off;
788         return true;
789 }
790
791 static void create_tunnel_header(const struct vport *vport,
792                                  const struct tnl_mutable_config *mutable,
793                                  const struct rtable *rt, void *header)
794 {
795         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
796         struct iphdr *iph = header;
797
798         iph->version    = 4;
799         iph->ihl        = sizeof(struct iphdr) >> 2;
800         iph->frag_off   = htons(IP_DF);
801         iph->protocol   = tnl_vport->tnl_ops->ipproto;
802         iph->tos        = mutable->tos;
803         iph->daddr      = rt->rt_dst;
804         iph->saddr      = rt->rt_src;
805         iph->ttl        = mutable->ttl;
806         if (!iph->ttl)
807                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
808
809         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
810 }
811
812 static void *get_cached_header(const struct tnl_cache *cache)
813 {
814         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
815 }
816
817 #ifdef HAVE_RT_GENID
818 static inline int rt_genid(struct net *net)
819 {
820         return atomic_read(&net->ipv4.rt_genid);
821 }
822 #endif
823
824 static bool check_cache_valid(const struct tnl_cache *cache,
825                               const struct tnl_mutable_config *mutable)
826 {
827         struct hh_cache *hh;
828
829         if (!cache)
830                 return false;
831
832         hh = rt_hh(cache->rt);
833         return hh &&
834 #ifdef NEED_CACHE_TIMEOUT
835                 time_before(jiffies, cache->expiration) &&
836 #endif
837 #ifdef HAVE_RT_GENID
838                 rt_genid(dev_net(rt_dst(cache->rt).dev)) == cache->rt->rt_genid &&
839 #endif
840 #ifdef HAVE_HH_SEQ
841                 hh->hh_lock.sequence == cache->hh_seq &&
842 #endif
843                 mutable->seq == cache->mutable_seq &&
844                 (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
845                 (cache->flow && !cache->flow->dead));
846 }
847
848 static void __cache_cleaner(struct tnl_vport *tnl_vport)
849 {
850         const struct tnl_mutable_config *mutable =
851                         rcu_dereference(tnl_vport->mutable);
852         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
853
854         if (cache && !check_cache_valid(cache, mutable) &&
855             spin_trylock_bh(&tnl_vport->cache_lock)) {
856                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
857                 spin_unlock_bh(&tnl_vport->cache_lock);
858         }
859 }
860
861 static void cache_cleaner(struct work_struct *work)
862 {
863         int i;
864
865         schedule_cache_cleaner();
866
867         rcu_read_lock();
868         for (i = 0; i < PORT_TABLE_SIZE; i++) {
869                 struct hlist_node *n;
870                 struct hlist_head *bucket;
871                 struct tnl_vport *tnl_vport;
872
873                 bucket = &port_table[i];
874                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
875                         __cache_cleaner(tnl_vport);
876         }
877         rcu_read_unlock();
878 }
879
880 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
881 {
882         void *cache_data = get_cached_header(cache);
883         int hh_off;
884
885 #ifdef HAVE_HH_SEQ
886         unsigned hh_seq;
887
888         do {
889                 hh_seq = read_seqbegin(&hh->hh_lock);
890                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
891                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
892                 cache->hh_len = hh->hh_len;
893         } while (read_seqretry(&hh->hh_lock, hh_seq));
894
895         cache->hh_seq = hh_seq;
896 #else
897         read_lock(&hh->hh_lock);
898         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
899         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
900         cache->hh_len = hh->hh_len;
901         read_unlock(&hh->hh_lock);
902 #endif
903 }
904
905 static struct tnl_cache *build_cache(struct vport *vport,
906                                      const struct tnl_mutable_config *mutable,
907                                      struct rtable *rt)
908 {
909         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
910         struct tnl_cache *cache;
911         void *cache_data;
912         int cache_len;
913         struct hh_cache *hh;
914
915         if (!(mutable->flags & TNL_F_HDR_CACHE))
916                 return NULL;
917
918         /*
919          * If there is no entry in the ARP cache or if this device does not
920          * support hard header caching just fall back to the IP stack.
921          */
922
923         hh = rt_hh(rt);
924         if (!hh)
925                 return NULL;
926
927         /*
928          * If lock is contended fall back to directly building the header.
929          * We're not going to help performance by sitting here spinning.
930          */
931         if (!spin_trylock(&tnl_vport->cache_lock))
932                 return NULL;
933
934         cache = cache_dereference(tnl_vport);
935         if (check_cache_valid(cache, mutable))
936                 goto unlock;
937         else
938                 cache = NULL;
939
940         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
941
942         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
943                         cache_len, GFP_ATOMIC);
944         if (!cache)
945                 goto unlock;
946
947         create_eth_hdr(cache, hh);
948         cache_data = get_cached_header(cache) + cache->hh_len;
949         cache->len = cache->hh_len + mutable->tunnel_hlen;
950
951         create_tunnel_header(vport, mutable, rt, cache_data);
952
953         cache->mutable_seq = mutable->seq;
954         cache->rt = rt;
955 #ifdef NEED_CACHE_TIMEOUT
956         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
957 #endif
958
959         if (ovs_is_internal_dev(rt_dst(rt).dev)) {
960                 struct sw_flow_key flow_key;
961                 struct vport *dst_vport;
962                 struct sk_buff *skb;
963                 int err;
964                 int flow_key_len;
965                 struct sw_flow *flow;
966
967                 dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
968                 if (!dst_vport)
969                         goto done;
970
971                 skb = alloc_skb(cache->len, GFP_ATOMIC);
972                 if (!skb)
973                         goto done;
974
975                 __skb_put(skb, cache->len);
976                 memcpy(skb->data, get_cached_header(cache), cache->len);
977
978                 err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
979                                        &flow_key_len);
980
981                 consume_skb(skb);
982                 if (err)
983                         goto done;
984
985                 flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
986                                            &flow_key, flow_key_len);
987                 if (flow) {
988                         cache->flow = flow;
989                         ovs_flow_hold(flow);
990                 }
991         }
992
993 done:
994         assign_cache_rcu(vport, cache);
995
996 unlock:
997         spin_unlock(&tnl_vport->cache_lock);
998
999         return cache;
1000 }
1001
1002 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
1003                                    u8 ipproto, u8 tos)
1004 {
1005 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
1006         struct flowi fl = { .nl_u = { .ip4_u = {
1007                                         .daddr = mutable->key.daddr,
1008                                         .saddr = mutable->key.saddr,
1009                                         .tos = tos } },
1010                             .proto = ipproto };
1011         struct rtable *rt;
1012
1013         if (unlikely(ip_route_output_key(port_key_get_net(&mutable->key), &rt, &fl)))
1014                 return ERR_PTR(-EADDRNOTAVAIL);
1015
1016         return rt;
1017 #else
1018         struct flowi4 fl = { .daddr = mutable->key.daddr,
1019                              .saddr = mutable->key.saddr,
1020                              .flowi4_tos = tos,
1021                              .flowi4_proto = ipproto };
1022
1023         return ip_route_output_key(port_key_get_net(&mutable->key), &fl);
1024 #endif
1025 }
1026
1027 static struct rtable *find_route(struct vport *vport,
1028                                  const struct tnl_mutable_config *mutable,
1029                                  u8 tos, struct tnl_cache **cache)
1030 {
1031         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1032         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1033
1034         *cache = NULL;
1035         tos = RT_TOS(tos);
1036
1037         if (likely(tos == mutable->tos &&
1038             check_cache_valid(cur_cache, mutable))) {
1039                 *cache = cur_cache;
1040                 return cur_cache->rt;
1041         } else {
1042                 struct rtable *rt;
1043
1044                 rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
1045                 if (IS_ERR(rt))
1046                         return NULL;
1047
1048                 if (likely(tos == mutable->tos))
1049                         *cache = build_cache(vport, mutable, rt);
1050
1051                 return rt;
1052         }
1053 }
1054
1055 static bool need_linearize(const struct sk_buff *skb)
1056 {
1057         int i;
1058
1059         if (unlikely(skb_shinfo(skb)->frag_list))
1060                 return true;
1061
1062         /*
1063          * Generally speaking we should linearize if there are paged frags.
1064          * However, if all of the refcounts are 1 we know nobody else can
1065          * change them from underneath us and we can skip the linearization.
1066          */
1067         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1068                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1069                         return true;
1070
1071         return false;
1072 }
1073
1074 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1075                                        const struct tnl_mutable_config *mutable,
1076                                        const struct rtable *rt)
1077 {
1078         int min_headroom;
1079         int err;
1080
1081         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1082                         + mutable->tunnel_hlen
1083                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1084
1085         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1086                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1087                                                 skb_headroom(skb) +
1088                                                 16);
1089                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1090                                         0, GFP_ATOMIC);
1091                 if (unlikely(err))
1092                         goto error_free;
1093         }
1094
1095         forward_ip_summed(skb, true);
1096
1097         if (skb_is_gso(skb)) {
1098                 struct sk_buff *nskb;
1099
1100                 nskb = skb_gso_segment(skb, 0);
1101                 if (IS_ERR(nskb)) {
1102                         kfree_skb(skb);
1103                         err = PTR_ERR(nskb);
1104                         goto error;
1105                 }
1106
1107                 consume_skb(skb);
1108                 skb = nskb;
1109         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1110                 /* Pages aren't locked and could change at any time.
1111                  * If this happens after we compute the checksum, the
1112                  * checksum will be wrong.  We linearize now to avoid
1113                  * this problem.
1114                  */
1115                 if (unlikely(need_linearize(skb))) {
1116                         err = __skb_linearize(skb);
1117                         if (unlikely(err))
1118                                 goto error_free;
1119                 }
1120
1121                 err = skb_checksum_help(skb);
1122                 if (unlikely(err))
1123                         goto error_free;
1124         }
1125
1126         set_ip_summed(skb, OVS_CSUM_NONE);
1127
1128         return skb;
1129
1130 error_free:
1131         kfree_skb(skb);
1132 error:
1133         return ERR_PTR(err);
1134 }
1135
1136 static int send_frags(struct sk_buff *skb,
1137                       const struct tnl_mutable_config *mutable)
1138 {
1139         int sent_len;
1140
1141         sent_len = 0;
1142         while (skb) {
1143                 struct sk_buff *next = skb->next;
1144                 int frag_len = skb->len - mutable->tunnel_hlen;
1145                 int err;
1146
1147                 skb->next = NULL;
1148                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1149
1150                 err = ip_local_out(skb);
1151                 skb = next;
1152                 if (unlikely(net_xmit_eval(err)))
1153                         goto free_frags;
1154                 sent_len += frag_len;
1155         }
1156
1157         return sent_len;
1158
1159 free_frags:
1160         /*
1161          * There's no point in continuing to send fragments once one has been
1162          * dropped so just free the rest.  This may help improve the congestion
1163          * that caused the first packet to be dropped.
1164          */
1165         ovs_tnl_free_linked_skbs(skb);
1166         return sent_len;
1167 }
1168
1169 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
1170 {
1171         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1172         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1173
1174         enum vport_err_type err = VPORT_E_TX_ERROR;
1175         struct rtable *rt;
1176         struct dst_entry *unattached_dst = NULL;
1177         struct tnl_cache *cache;
1178         int sent_len = 0;
1179         __be16 frag_off = 0;
1180         u8 ttl;
1181         u8 inner_tos;
1182         u8 tos;
1183
1184         /* Validate the protocol headers before we try to use them. */
1185         if (skb->protocol == htons(ETH_P_8021Q) &&
1186             !vlan_tx_tag_present(skb)) {
1187                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1188                         goto error_free;
1189
1190                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1191                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1192         }
1193
1194         if (skb->protocol == htons(ETH_P_IP)) {
1195                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1196                     + sizeof(struct iphdr))))
1197                         skb->protocol = 0;
1198         }
1199 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1200         else if (skb->protocol == htons(ETH_P_IPV6)) {
1201                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1202                     + sizeof(struct ipv6hdr))))
1203                         skb->protocol = 0;
1204         }
1205 #endif
1206
1207         /* ToS */
1208         if (skb->protocol == htons(ETH_P_IP))
1209                 inner_tos = ip_hdr(skb)->tos;
1210 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1211         else if (skb->protocol == htons(ETH_P_IPV6))
1212                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1213 #endif
1214         else
1215                 inner_tos = 0;
1216
1217         if (mutable->flags & TNL_F_TOS_INHERIT)
1218                 tos = inner_tos;
1219         else
1220                 tos = mutable->tos;
1221
1222         tos = INET_ECN_encapsulate(tos, inner_tos);
1223
1224         /* Route lookup */
1225         rt = find_route(vport, mutable, tos, &cache);
1226         if (unlikely(!rt))
1227                 goto error_free;
1228         if (unlikely(!cache))
1229                 unattached_dst = &rt_dst(rt);
1230
1231         /* Reset SKB */
1232         nf_reset(skb);
1233         secpath_reset(skb);
1234         skb_dst_drop(skb);
1235         skb_clear_rxhash(skb);
1236
1237         /* Offloading */
1238         skb = handle_offloads(skb, mutable, rt);
1239         if (IS_ERR(skb))
1240                 goto error;
1241
1242         /* MTU */
1243         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1244                 err = VPORT_E_TX_DROPPED;
1245                 goto error_free;
1246         }
1247
1248         /*
1249          * If we are over the MTU, allow the IP stack to handle fragmentation.
1250          * Fragmentation is a slow path anyways.
1251          */
1252         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1253                      cache)) {
1254                 unattached_dst = &rt_dst(rt);
1255                 dst_hold(unattached_dst);
1256                 cache = NULL;
1257         }
1258
1259         /* TTL */
1260         ttl = mutable->ttl;
1261         if (!ttl)
1262                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1263
1264         if (mutable->flags & TNL_F_TTL_INHERIT) {
1265                 if (skb->protocol == htons(ETH_P_IP))
1266                         ttl = ip_hdr(skb)->ttl;
1267 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1268                 else if (skb->protocol == htons(ETH_P_IPV6))
1269                         ttl = ipv6_hdr(skb)->hop_limit;
1270 #endif
1271         }
1272
1273         while (skb) {
1274                 struct iphdr *iph;
1275                 struct sk_buff *next_skb = skb->next;
1276                 skb->next = NULL;
1277
1278                 if (unlikely(vlan_deaccel_tag(skb)))
1279                         goto next;
1280
1281                 if (likely(cache)) {
1282                         skb_push(skb, cache->len);
1283                         memcpy(skb->data, get_cached_header(cache), cache->len);
1284                         skb_reset_mac_header(skb);
1285                         skb_set_network_header(skb, cache->hh_len);
1286
1287                 } else {
1288                         skb_push(skb, mutable->tunnel_hlen);
1289                         create_tunnel_header(vport, mutable, rt, skb->data);
1290                         skb_reset_network_header(skb);
1291
1292                         if (next_skb)
1293                                 skb_dst_set(skb, dst_clone(unattached_dst));
1294                         else {
1295                                 skb_dst_set(skb, unattached_dst);
1296                                 unattached_dst = NULL;
1297                         }
1298                 }
1299                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1300
1301                 iph = ip_hdr(skb);
1302                 iph->tos = tos;
1303                 iph->ttl = ttl;
1304                 iph->frag_off = frag_off;
1305                 ip_select_ident(iph, &rt_dst(rt), NULL);
1306
1307                 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1308                                                         &rt_dst(rt), skb);
1309                 if (unlikely(!skb))
1310                         goto next;
1311
1312                 if (likely(cache)) {
1313                         int orig_len = skb->len - cache->len;
1314                         struct vport *cache_vport;
1315
1316                         cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
1317                         skb->protocol = htons(ETH_P_IP);
1318                         iph = ip_hdr(skb);
1319                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1320                         ip_send_check(iph);
1321
1322                         if (cache_vport) {
1323                                 if (unlikely(compute_ip_summed(skb, true))) {
1324                                         kfree_skb(skb);
1325                                         goto next;
1326                                 }
1327
1328                                 OVS_CB(skb)->flow = cache->flow;
1329                                 ovs_vport_receive(cache_vport, skb);
1330                                 sent_len += orig_len;
1331                         } else {
1332                                 int xmit_err;
1333
1334                                 skb->dev = rt_dst(rt).dev;
1335                                 xmit_err = dev_queue_xmit(skb);
1336
1337                                 if (likely(net_xmit_eval(xmit_err) == 0))
1338                                         sent_len += orig_len;
1339                         }
1340                 } else
1341                         sent_len += send_frags(skb, mutable);
1342
1343 next:
1344                 skb = next_skb;
1345         }
1346
1347         if (unlikely(sent_len == 0))
1348                 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
1349
1350         goto out;
1351
1352 error_free:
1353         ovs_tnl_free_linked_skbs(skb);
1354 error:
1355         ovs_vport_record_error(vport, err);
1356 out:
1357         dst_release(unattached_dst);
1358         return sent_len;
1359 }
1360
1361 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1362         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1363         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1364         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1365         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1366         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1367         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1368         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1369 };
1370
1371 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1372  * zeroed. */
1373 static int tnl_set_config(struct net *net, struct nlattr *options,
1374                           const struct tnl_ops *tnl_ops,
1375                           const struct vport *cur_vport,
1376                           struct tnl_mutable_config *mutable)
1377 {
1378         const struct vport *old_vport;
1379         const struct tnl_mutable_config *old_mutable;
1380         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1381         int err;
1382
1383         if (!options)
1384                 return -EINVAL;
1385
1386         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1387         if (err)
1388                 return err;
1389
1390         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1391                 return -EINVAL;
1392
1393         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1394
1395         port_key_set_net(&mutable->key, net);
1396         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1397         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1398                 if (ipv4_is_multicast(mutable->key.daddr))
1399                         return -EINVAL;
1400                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1401         }
1402
1403         if (a[OVS_TUNNEL_ATTR_TOS]) {
1404                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1405                 if (mutable->tos != RT_TOS(mutable->tos))
1406                         return -EINVAL;
1407         }
1408
1409         if (a[OVS_TUNNEL_ATTR_TTL])
1410                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1411
1412         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1413         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1414                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1415                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1416         } else {
1417                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1418                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1419         }
1420
1421         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1422                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1423         else
1424                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1425
1426         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1427         if (mutable->tunnel_hlen < 0)
1428                 return mutable->tunnel_hlen;
1429
1430         mutable->tunnel_hlen += sizeof(struct iphdr);
1431
1432         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1433         if (old_vport && old_vport != cur_vport)
1434                 return -EEXIST;
1435
1436         mutable->mlink = 0;
1437         if (ipv4_is_multicast(mutable->key.daddr)) {
1438                 struct net_device *dev;
1439                 struct rtable *rt;
1440
1441                 rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
1442                 if (IS_ERR(rt))
1443                         return -EADDRNOTAVAIL;
1444                 dev = rt_dst(rt).dev;
1445                 ip_rt_put(rt);
1446                 if (__in_dev_get_rtnl(dev) == NULL)
1447                         return -EADDRNOTAVAIL;
1448                 mutable->mlink = dev->ifindex;
1449                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1450         }
1451
1452         return 0;
1453 }
1454
1455 struct vport *ovs_tnl_create(const struct vport_parms *parms,
1456                              const struct vport_ops *vport_ops,
1457                              const struct tnl_ops *tnl_ops)
1458 {
1459         struct vport *vport;
1460         struct tnl_vport *tnl_vport;
1461         struct tnl_mutable_config *mutable;
1462         int initial_frag_id;
1463         int err;
1464
1465         vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1466         if (IS_ERR(vport)) {
1467                 err = PTR_ERR(vport);
1468                 goto error;
1469         }
1470
1471         tnl_vport = tnl_vport_priv(vport);
1472
1473         strcpy(tnl_vport->name, parms->name);
1474         tnl_vport->tnl_ops = tnl_ops;
1475
1476         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1477         if (!mutable) {
1478                 err = -ENOMEM;
1479                 goto error_free_vport;
1480         }
1481
1482         random_ether_addr(mutable->eth_addr);
1483
1484         get_random_bytes(&initial_frag_id, sizeof(int));
1485         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1486
1487         err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
1488                              NULL, mutable);
1489         if (err)
1490                 goto error_free_mutable;
1491
1492         spin_lock_init(&tnl_vport->cache_lock);
1493
1494 #ifdef NEED_CACHE_TIMEOUT
1495         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1496                                        (net_random() % (MAX_CACHE_EXP / 2));
1497 #endif
1498
1499         rcu_assign_pointer(tnl_vport->mutable, mutable);
1500
1501         port_table_add_port(vport);
1502         return vport;
1503
1504 error_free_mutable:
1505         free_mutable_rtnl(mutable);
1506         kfree(mutable);
1507 error_free_vport:
1508         ovs_vport_free(vport);
1509 error:
1510         return ERR_PTR(err);
1511 }
1512
1513 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
1514 {
1515         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1516         const struct tnl_mutable_config *old_mutable;
1517         struct tnl_mutable_config *mutable;
1518         int err;
1519
1520         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1521         if (!mutable) {
1522                 err = -ENOMEM;
1523                 goto error;
1524         }
1525
1526         /* Copy fields whose values should be retained. */
1527         old_mutable = rtnl_dereference(tnl_vport->mutable);
1528         mutable->seq = old_mutable->seq + 1;
1529         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1530
1531         /* Parse the others configured by userspace. */
1532         err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
1533                              vport, mutable);
1534         if (err)
1535                 goto error_free;
1536
1537         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1538                 port_table_move_port(vport, mutable);
1539         else
1540                 assign_config_rcu(vport, mutable);
1541
1542         return 0;
1543
1544 error_free:
1545         free_mutable_rtnl(mutable);
1546         kfree(mutable);
1547 error:
1548         return err;
1549 }
1550
1551 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1552 {
1553         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1554         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1555
1556         if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
1557                       mutable->flags & TNL_F_PUBLIC) ||
1558             nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
1559                 goto nla_put_failure;
1560
1561         if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
1562             nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
1563                 goto nla_put_failure;
1564         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
1565             nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
1566                 goto nla_put_failure;
1567         if (mutable->key.saddr &&
1568             nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
1569                 goto nla_put_failure;
1570         if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
1571                 goto nla_put_failure;
1572         if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
1573                 goto nla_put_failure;
1574
1575         return 0;
1576
1577 nla_put_failure:
1578         return -EMSGSIZE;
1579 }
1580
1581 static void free_port_rcu(struct rcu_head *rcu)
1582 {
1583         struct tnl_vport *tnl_vport = container_of(rcu,
1584                                                    struct tnl_vport, rcu);
1585
1586         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1587         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1588         ovs_vport_free(tnl_vport_to_vport(tnl_vport));
1589 }
1590
1591 void ovs_tnl_destroy(struct vport *vport)
1592 {
1593         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1594         struct tnl_mutable_config *mutable;
1595
1596         mutable = rtnl_dereference(tnl_vport->mutable);
1597         port_table_remove_port(vport);
1598         free_mutable_rtnl(mutable);
1599         call_rcu(&tnl_vport->rcu, free_port_rcu);
1600 }
1601
1602 int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
1603 {
1604         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1605         struct tnl_mutable_config *old_mutable, *mutable;
1606
1607         old_mutable = rtnl_dereference(tnl_vport->mutable);
1608         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1609         if (!mutable)
1610                 return -ENOMEM;
1611
1612         old_mutable->mlink = 0;
1613
1614         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1615         assign_config_rcu(vport, mutable);
1616
1617         return 0;
1618 }
1619
1620 const char *ovs_tnl_get_name(const struct vport *vport)
1621 {
1622         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1623         return tnl_vport->name;
1624 }
1625
1626 const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
1627 {
1628         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1629         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1630 }
1631
1632 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
1633 {
1634         while (skb) {
1635                 struct sk_buff *next = skb->next;
1636                 kfree_skb(skb);
1637                 skb = next;
1638         }
1639 }
1640
1641 int ovs_tnl_init(void)
1642 {
1643         int i;
1644
1645         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1646                              GFP_KERNEL);
1647         if (!port_table)
1648                 return -ENOMEM;
1649
1650         for (i = 0; i < PORT_TABLE_SIZE; i++)
1651                 INIT_HLIST_HEAD(&port_table[i]);
1652
1653         return 0;
1654 }
1655
1656 void ovs_tnl_exit(void)
1657 {
1658         kfree(port_table);
1659 }