2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <linux/rculist.h>
22 #include <net/dsfield.h>
25 #include <net/inet_ecn.h>
27 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
30 #include <net/route.h>
39 #include "vport-generic.h"
40 #include "vport-internal_dev.h"
42 #ifdef NEED_CACHE_TIMEOUT
44 * On kernels where we can't quickly detect changes in the rest of the system
45 * we use an expiration time to invalidate the cache. A shorter expiration
46 * reduces the length of time that we may potentially blackhole packets while
47 * a longer time increases performance by reducing the frequency that the
48 * cache needs to be rebuilt. A variety of factors may cause the cache to be
49 * invalidated before the expiration time but this is the maximum. The time
50 * is expressed in jiffies.
52 #define MAX_CACHE_EXP HZ
56 * Interval to check for and remove caches that are no longer valid. Caches
57 * are checked for validity before they are used for packet encapsulation and
58 * old caches are removed at that time. However, if no packets are sent through
59 * the tunnel then the cache will never be destroyed. Since it holds
60 * references to a number of system objects, the cache will continue to use
61 * system resources by not allowing those objects to be destroyed. The cache
62 * cleaner is periodically run to free invalid caches. It does not
63 * significantly affect system performance. A lower interval will release
64 * resources faster but will itself consume resources by requiring more frequent
65 * checks. A longer interval may result in messages being printed to the kernel
66 * message buffer about unreleased resources. The interval is expressed in
69 #define CACHE_CLEANER_INTERVAL (5 * HZ)
71 #define CACHE_DATA_ALIGN 16
72 #define PORT_TABLE_SIZE 1024
74 static struct hlist_head *port_table __read_mostly;
75 static int port_table_count;
77 static void cache_cleaner(struct work_struct *work);
78 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
81 * These are just used as an optimization: they don't require any kind of
82 * synchronization because we could have just as easily read the value before
83 * the port change happened.
85 static unsigned int key_local_remote_ports __read_mostly;
86 static unsigned int key_remote_ports __read_mostly;
87 static unsigned int local_remote_ports __read_mostly;
88 static unsigned int remote_ports __read_mostly;
90 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
91 #define rt_dst(rt) (rt->dst)
93 #define rt_dst(rt) (rt->u.dst)
96 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
98 return vport_from_priv(tnl_vport);
101 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
102 * cache_lock is held, so it is only for update side code.
104 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
106 return rcu_dereference_protected(tnl_vport->cache,
107 lockdep_is_held(&tnl_vport->cache_lock));
110 static inline void schedule_cache_cleaner(void)
112 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
115 static void free_cache(struct tnl_cache *cache)
120 flow_put(cache->flow);
121 ip_rt_put(cache->rt);
125 static void free_config_rcu(struct rcu_head *rcu)
127 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
131 static void free_cache_rcu(struct rcu_head *rcu)
133 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
137 static void assign_config_rcu(struct vport *vport,
138 struct tnl_mutable_config *new_config)
140 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
141 struct tnl_mutable_config *old_config;
143 old_config = rtnl_dereference(tnl_vport->mutable);
144 rcu_assign_pointer(tnl_vport->mutable, new_config);
145 call_rcu(&old_config->rcu, free_config_rcu);
148 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
150 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
151 struct tnl_cache *old_cache;
153 old_cache = cache_dereference(tnl_vport);
154 rcu_assign_pointer(tnl_vport->cache, new_cache);
157 call_rcu(&old_cache->rcu, free_cache_rcu);
160 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
162 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
164 return &local_remote_ports;
166 return &remote_ports;
169 return &key_local_remote_ports;
171 return &key_remote_ports;
175 struct port_lookup_key {
176 const struct tnl_mutable_config *mutable;
184 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
187 static int port_cmp(const struct tnl_vport *tnl_vport,
188 struct port_lookup_key *lookup)
190 lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
192 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
193 lookup->mutable->daddr == lookup->daddr &&
194 lookup->mutable->in_key == lookup->key &&
195 lookup->mutable->saddr == lookup->saddr);
198 static u32 port_hash(struct port_lookup_key *k)
200 u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
202 return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
205 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
207 struct port_lookup_key lookup;
209 lookup.saddr = mutable->saddr;
210 lookup.daddr = mutable->daddr;
211 lookup.key = mutable->in_key;
212 lookup.tunnel_type = mutable->tunnel_type;
214 return port_hash(&lookup);
218 static inline struct hlist_head *find_bucket(u32 hash)
220 return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
223 static void port_table_add_port(struct vport *vport)
225 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
226 u32 hash = mutable_hash(rtnl_dereference(tnl_vport->mutable));
228 if (port_table_count == 0)
229 schedule_cache_cleaner();
231 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
234 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
237 static void port_table_move_port(struct vport *vport,
238 struct tnl_mutable_config *new_mutable)
240 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
243 hash = mutable_hash(new_mutable);
244 hlist_del_init_rcu(&tnl_vport->hash_node);
245 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
247 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
248 assign_config_rcu(vport, new_mutable);
249 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
252 static void port_table_remove_port(struct vport *vport)
254 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
256 hlist_del_init_rcu(&tnl_vport->hash_node);
259 if (port_table_count == 0)
260 cancel_delayed_work_sync(&cache_cleaner_wq);
262 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
265 static struct tnl_vport *port_table_lookup(struct port_lookup_key *lookup)
267 struct hlist_node *n;
268 struct hlist_head *bucket;
269 u32 hash = port_hash(lookup);
270 struct tnl_vport * tnl_vport;
272 bucket = find_bucket(hash);
274 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
275 if (port_cmp(tnl_vport, lookup))
282 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
284 const struct tnl_mutable_config **mutable)
286 struct port_lookup_key lookup;
287 struct tnl_vport * tnl_vport;
289 lookup.saddr = saddr;
290 lookup.daddr = daddr;
292 if (tunnel_type & TNL_T_KEY_EXACT) {
294 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
296 if (key_local_remote_ports) {
297 tnl_vport = port_table_lookup(&lookup);
302 if (key_remote_ports) {
304 tnl_vport = port_table_lookup(&lookup);
308 lookup.saddr = saddr;
312 if (tunnel_type & TNL_T_KEY_MATCH) {
314 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
316 if (local_remote_ports) {
317 tnl_vport = port_table_lookup(&lookup);
324 tnl_vport = port_table_lookup(&lookup);
333 *mutable = lookup.mutable;
334 return tnl_vport_to_vport(tnl_vport);
337 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
339 if (unlikely(INET_ECN_is_ce(tos))) {
340 __be16 protocol = skb->protocol;
342 skb_set_network_header(skb, ETH_HLEN);
344 if (protocol == htons(ETH_P_8021Q)) {
345 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
348 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
349 skb_set_network_header(skb, VLAN_ETH_HLEN);
352 if (protocol == htons(ETH_P_IP)) {
353 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
354 + sizeof(struct iphdr))))
357 IP_ECN_set_ce(ip_hdr(skb));
359 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
360 else if (protocol == htons(ETH_P_IPV6)) {
361 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
362 + sizeof(struct ipv6hdr))))
365 IP6_ECN_set_ce(ipv6_hdr(skb));
372 * tnl_rcv - ingress point for generic tunnel code
374 * @vport: port this packet was received on
375 * @skb: received packet
376 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
378 * Must be called with rcu_read_lock.
380 * Packets received by this function are in the following state:
381 * - skb->data points to the inner Ethernet header.
382 * - The inner Ethernet header is in the linear data area.
383 * - skb->csum does not include the inner Ethernet header.
384 * - The layer pointers are undefined.
386 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
390 skb_reset_mac_header(skb);
393 if (likely(ntohs(eh->h_proto) >= 1536))
394 skb->protocol = eh->h_proto;
396 skb->protocol = htons(ETH_P_802_2);
400 skb_clear_rxhash(skb);
403 ecn_decapsulate(skb, tos);
404 vlan_set_tci(skb, 0);
406 if (unlikely(compute_ip_summed(skb, false))) {
411 vport_receive(vport, skb);
414 static bool check_ipv4_address(__be32 addr)
416 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
417 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
423 static bool ipv4_should_icmp(struct sk_buff *skb)
425 struct iphdr *old_iph = ip_hdr(skb);
427 /* Don't respond to L2 broadcast. */
428 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
431 /* Don't respond to L3 broadcast or invalid addresses. */
432 if (!check_ipv4_address(old_iph->daddr) ||
433 !check_ipv4_address(old_iph->saddr))
436 /* Only respond to the first fragment. */
437 if (old_iph->frag_off & htons(IP_OFFSET))
440 /* Don't respond to ICMP error messages. */
441 if (old_iph->protocol == IPPROTO_ICMP) {
442 u8 icmp_type, *icmp_typep;
444 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
445 (old_iph->ihl << 2) +
446 offsetof(struct icmphdr, type) -
447 skb->data, sizeof(icmp_type),
453 if (*icmp_typep > NR_ICMP_TYPES
454 || (*icmp_typep <= ICMP_PARAMETERPROB
455 && *icmp_typep != ICMP_ECHOREPLY
456 && *icmp_typep != ICMP_ECHO))
463 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
464 unsigned int mtu, unsigned int payload_length)
466 struct iphdr *iph, *old_iph = ip_hdr(skb);
467 struct icmphdr *icmph;
470 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
471 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
472 payload = skb_put(nskb, payload_length);
476 iph->ihl = sizeof(struct iphdr) >> 2;
477 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
478 IPTOS_PREC_INTERNETCONTROL;
479 iph->tot_len = htons(sizeof(struct iphdr)
480 + sizeof(struct icmphdr)
482 get_random_bytes(&iph->id, sizeof(iph->id));
485 iph->protocol = IPPROTO_ICMP;
486 iph->daddr = old_iph->saddr;
487 iph->saddr = old_iph->daddr;
492 icmph->type = ICMP_DEST_UNREACH;
493 icmph->code = ICMP_FRAG_NEEDED;
494 icmph->un.gateway = htonl(mtu);
497 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
498 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
499 payload, payload_length,
501 icmph->checksum = csum_fold(nskb->csum);
504 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
505 static bool ipv6_should_icmp(struct sk_buff *skb)
507 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
509 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
510 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
512 /* Check source address is valid. */
513 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
514 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
517 /* Don't reply to unspecified addresses. */
518 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
521 /* Don't respond to ICMP error messages. */
522 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
526 if (nexthdr == NEXTHDR_ICMP) {
527 u8 icmp_type, *icmp_typep;
529 icmp_typep = skb_header_pointer(skb, payload_off +
530 offsetof(struct icmp6hdr,
532 sizeof(icmp_type), &icmp_type);
534 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
541 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
542 unsigned int mtu, unsigned int payload_length)
544 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
545 struct icmp6hdr *icmp6h;
548 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
549 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
550 payload = skb_put(nskb, payload_length);
555 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
556 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
558 ipv6h->nexthdr = NEXTHDR_ICMP;
559 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
560 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
561 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
564 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
565 icmp6h->icmp6_code = 0;
566 icmp6h->icmp6_cksum = 0;
567 icmp6h->icmp6_mtu = htonl(mtu);
569 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
570 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
571 payload, payload_length,
573 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
574 sizeof(struct icmp6hdr)
576 ipv6h->nexthdr, nskb->csum);
580 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
581 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
583 unsigned int eth_hdr_len = ETH_HLEN;
584 unsigned int total_length = 0, header_length = 0, payload_length;
585 struct ethhdr *eh, *old_eh = eth_hdr(skb);
586 struct sk_buff *nskb;
589 if (skb->protocol == htons(ETH_P_IP)) {
590 if (mtu < IP_MIN_MTU)
593 if (!ipv4_should_icmp(skb))
596 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
597 else if (skb->protocol == htons(ETH_P_IPV6)) {
598 if (mtu < IPV6_MIN_MTU)
602 * In theory we should do PMTUD on IPv6 multicast messages but
603 * we don't have an address to send from so just fragment.
605 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
608 if (!ipv6_should_icmp(skb))
616 if (old_eh->h_proto == htons(ETH_P_8021Q))
617 eth_hdr_len = VLAN_ETH_HLEN;
619 payload_length = skb->len - eth_hdr_len;
620 if (skb->protocol == htons(ETH_P_IP)) {
621 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
622 total_length = min_t(unsigned int, header_length +
623 payload_length, 576);
625 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
627 header_length = sizeof(struct ipv6hdr) +
628 sizeof(struct icmp6hdr);
629 total_length = min_t(unsigned int, header_length +
630 payload_length, IPV6_MIN_MTU);
634 payload_length = total_length - header_length;
636 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
641 skb_reserve(nskb, NET_IP_ALIGN);
643 /* Ethernet / VLAN */
644 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
645 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
646 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
647 nskb->protocol = eh->h_proto = old_eh->h_proto;
648 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
649 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
651 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
652 vh->h_vlan_encapsulated_proto = skb->protocol;
654 vlan_set_tci(nskb, vlan_get_tci(skb));
655 skb_reset_mac_header(nskb);
658 if (skb->protocol == htons(ETH_P_IP))
659 ipv4_build_icmp(skb, nskb, mtu, payload_length);
660 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
662 ipv6_build_icmp(skb, nskb, mtu, payload_length);
666 * Assume that flow based keys are symmetric with respect to input
667 * and output and use the key that we were going to put on the
668 * outgoing packet for the fake received packet. If the keys are
669 * not symmetric then PMTUD needs to be disabled since we won't have
670 * any way of synthesizing packets.
672 if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
673 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
674 OVS_CB(nskb)->tun_id = flow_key;
676 if (unlikely(compute_ip_summed(nskb, false))) {
681 vport_receive(vport, nskb);
686 static bool check_mtu(struct sk_buff *skb,
688 const struct tnl_mutable_config *mutable,
689 const struct rtable *rt, __be16 *frag_offp)
691 bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
692 bool pmtud = mutable->flags & TNL_F_PMTUD;
693 __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
695 unsigned int packet_length = skb->len - ETH_HLEN;
697 /* Allow for one level of tagging in the packet length. */
698 if (!vlan_tx_tag_present(skb) &&
699 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
700 packet_length -= VLAN_HLEN;
705 /* The tag needs to go in packet regardless of where it
706 * currently is, so subtract it from the MTU.
708 if (vlan_tx_tag_present(skb) ||
709 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
710 vlan_header = VLAN_HLEN;
712 mtu = dst_mtu(&rt_dst(rt))
714 - mutable->tunnel_hlen
718 if (skb->protocol == htons(ETH_P_IP)) {
719 struct iphdr *iph = ip_hdr(skb);
722 frag_off = iph->frag_off & htons(IP_DF);
724 if (pmtud && iph->frag_off & htons(IP_DF)) {
725 mtu = max(mtu, IP_MIN_MTU);
727 if (packet_length > mtu &&
728 tnl_frag_needed(vport, mutable, skb, mtu,
729 OVS_CB(skb)->tun_id))
733 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
734 else if (skb->protocol == htons(ETH_P_IPV6)) {
735 /* IPv6 requires end hosts to do fragmentation
736 * if the packet is above the minimum MTU.
738 if (df_inherit && packet_length > IPV6_MIN_MTU)
739 frag_off = htons(IP_DF);
742 mtu = max(mtu, IPV6_MIN_MTU);
744 if (packet_length > mtu &&
745 tnl_frag_needed(vport, mutable, skb, mtu,
746 OVS_CB(skb)->tun_id))
752 *frag_offp = frag_off;
756 static void create_tunnel_header(const struct vport *vport,
757 const struct tnl_mutable_config *mutable,
758 const struct rtable *rt, void *header)
760 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
761 struct iphdr *iph = header;
764 iph->ihl = sizeof(struct iphdr) >> 2;
765 iph->frag_off = htons(IP_DF);
766 iph->protocol = tnl_vport->tnl_ops->ipproto;
767 iph->tos = mutable->tos;
768 iph->daddr = rt->rt_dst;
769 iph->saddr = rt->rt_src;
770 iph->ttl = mutable->ttl;
772 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
774 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
777 static inline void *get_cached_header(const struct tnl_cache *cache)
779 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
782 static inline bool check_cache_valid(const struct tnl_cache *cache,
783 const struct tnl_mutable_config *mutable)
786 #ifdef NEED_CACHE_TIMEOUT
787 time_before(jiffies, cache->expiration) &&
790 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
793 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
795 mutable->seq == cache->mutable_seq &&
796 (!is_internal_dev(rt_dst(cache->rt).dev) ||
797 (cache->flow && !cache->flow->dead));
800 static void __cache_cleaner(struct tnl_vport *tnl_vport)
802 const struct tnl_mutable_config *mutable =
803 rcu_dereference(tnl_vport->mutable);
804 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
806 if (cache && !check_cache_valid(cache, mutable) &&
807 spin_trylock_bh(&tnl_vport->cache_lock)) {
808 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
809 spin_unlock_bh(&tnl_vport->cache_lock);
813 static void cache_cleaner(struct work_struct *work)
817 schedule_cache_cleaner();
820 for (i = 0; i < PORT_TABLE_SIZE; i++) {
821 struct hlist_node *n;
822 struct hlist_head *bucket;
823 struct tnl_vport *tnl_vport;
825 bucket = &port_table[i];
826 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
827 __cache_cleaner(tnl_vport);
832 static inline void create_eth_hdr(struct tnl_cache *cache,
833 const struct rtable *rt)
835 void *cache_data = get_cached_header(cache);
836 int hh_len = rt_dst(rt).hh->hh_len;
837 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
843 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
844 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
845 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
847 cache->hh_seq = hh_seq;
849 read_lock_bh(&rt_dst(rt).hh->hh_lock);
850 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
851 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
855 static struct tnl_cache *build_cache(struct vport *vport,
856 const struct tnl_mutable_config *mutable,
859 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
860 struct tnl_cache *cache;
864 if (!(mutable->flags & TNL_F_HDR_CACHE))
868 * If there is no entry in the ARP cache or if this device does not
869 * support hard header caching just fall back to the IP stack.
875 * If lock is contended fall back to directly building the header.
876 * We're not going to help performance by sitting here spinning.
878 if (!spin_trylock_bh(&tnl_vport->cache_lock))
881 cache = cache_dereference(tnl_vport);
882 if (check_cache_valid(cache, mutable))
887 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
889 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
890 cache_len, GFP_ATOMIC);
894 cache->len = cache_len;
896 create_eth_hdr(cache, rt);
897 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
899 create_tunnel_header(vport, mutable, rt, cache_data);
901 cache->mutable_seq = mutable->seq;
903 #ifdef NEED_CACHE_TIMEOUT
904 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
907 if (is_internal_dev(rt_dst(rt).dev)) {
908 struct sw_flow_key flow_key;
909 struct vport *dst_vport;
914 struct sw_flow *flow;
916 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
920 skb = alloc_skb(cache->len, GFP_ATOMIC);
924 __skb_put(skb, cache->len);
925 memcpy(skb->data, get_cached_header(cache), cache->len);
927 err = flow_extract(skb, dst_vport->port_no, &flow_key,
928 &flow_key_len, &is_frag);
934 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
935 &flow_key, flow_key_len);
943 assign_cache_rcu(vport, cache);
946 spin_unlock_bh(&tnl_vport->cache_lock);
951 static struct rtable *find_route(struct vport *vport,
952 const struct tnl_mutable_config *mutable,
953 u8 tos, struct tnl_cache **cache)
955 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
956 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
961 if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
963 return cur_cache->rt;
966 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
967 struct flowi fl = { .nl_u = { .ip4_u =
968 { .daddr = mutable->daddr,
969 .saddr = mutable->saddr,
971 .proto = tnl_vport->tnl_ops->ipproto };
973 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
976 struct flowi4 fl = { .daddr = mutable->daddr,
977 .saddr = mutable->saddr,
979 .flowi4_proto = tnl_vport->tnl_ops->ipproto };
981 rt = ip_route_output_key(&init_net, &fl);
986 if (likely(tos == mutable->tos))
987 *cache = build_cache(vport, mutable, rt);
993 static inline bool need_linearize(const struct sk_buff *skb)
997 if (unlikely(skb_shinfo(skb)->frag_list))
1001 * Generally speaking we should linearize if there are paged frags.
1002 * However, if all of the refcounts are 1 we know nobody else can
1003 * change them from underneath us and we can skip the linearization.
1005 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1006 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
1012 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1013 const struct tnl_mutable_config *mutable,
1014 const struct rtable *rt)
1019 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1020 + mutable->tunnel_hlen
1021 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1023 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1024 int head_delta = SKB_DATA_ALIGN(min_headroom -
1027 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1033 forward_ip_summed(skb, true);
1035 if (skb_is_gso(skb)) {
1036 struct sk_buff *nskb;
1038 nskb = skb_gso_segment(skb, 0);
1041 err = PTR_ERR(nskb);
1047 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1048 /* Pages aren't locked and could change at any time.
1049 * If this happens after we compute the checksum, the
1050 * checksum will be wrong. We linearize now to avoid
1053 if (unlikely(need_linearize(skb))) {
1054 err = __skb_linearize(skb);
1059 err = skb_checksum_help(skb);
1064 set_ip_summed(skb, OVS_CSUM_NONE);
1071 return ERR_PTR(err);
1074 static int send_frags(struct sk_buff *skb,
1075 const struct tnl_mutable_config *mutable)
1081 struct sk_buff *next = skb->next;
1082 int frag_len = skb->len - mutable->tunnel_hlen;
1086 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1088 err = ip_local_out(skb);
1090 if (unlikely(net_xmit_eval(err)))
1092 sent_len += frag_len;
1099 * There's no point in continuing to send fragments once one has been
1100 * dropped so just free the rest. This may help improve the congestion
1101 * that caused the first packet to be dropped.
1103 tnl_free_linked_skbs(skb);
1107 int tnl_send(struct vport *vport, struct sk_buff *skb)
1109 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1110 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1112 enum vport_err_type err = VPORT_E_TX_ERROR;
1114 struct dst_entry *unattached_dst = NULL;
1115 struct tnl_cache *cache;
1117 __be16 frag_off = 0;
1122 /* Validate the protocol headers before we try to use them. */
1123 if (skb->protocol == htons(ETH_P_8021Q) &&
1124 !vlan_tx_tag_present(skb)) {
1125 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1128 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1129 skb_set_network_header(skb, VLAN_ETH_HLEN);
1132 if (skb->protocol == htons(ETH_P_IP)) {
1133 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1134 + sizeof(struct iphdr))))
1137 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1138 else if (skb->protocol == htons(ETH_P_IPV6)) {
1139 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1140 + sizeof(struct ipv6hdr))))
1146 if (skb->protocol == htons(ETH_P_IP))
1147 inner_tos = ip_hdr(skb)->tos;
1148 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1149 else if (skb->protocol == htons(ETH_P_IPV6))
1150 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1155 if (mutable->flags & TNL_F_TOS_INHERIT)
1160 tos = INET_ECN_encapsulate(tos, inner_tos);
1163 rt = find_route(vport, mutable, tos, &cache);
1166 if (unlikely(!cache))
1167 unattached_dst = &rt_dst(rt);
1173 skb_clear_rxhash(skb);
1176 skb = handle_offloads(skb, mutable, rt);
1181 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1182 err = VPORT_E_TX_DROPPED;
1187 * If we are over the MTU, allow the IP stack to handle fragmentation.
1188 * Fragmentation is a slow path anyways.
1190 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1192 unattached_dst = &rt_dst(rt);
1193 dst_hold(unattached_dst);
1200 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1202 if (mutable->flags & TNL_F_TTL_INHERIT) {
1203 if (skb->protocol == htons(ETH_P_IP))
1204 ttl = ip_hdr(skb)->ttl;
1205 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1206 else if (skb->protocol == htons(ETH_P_IPV6))
1207 ttl = ipv6_hdr(skb)->hop_limit;
1213 struct sk_buff *next_skb = skb->next;
1216 if (unlikely(vlan_deaccel_tag(skb)))
1219 if (likely(cache)) {
1220 skb_push(skb, cache->len);
1221 memcpy(skb->data, get_cached_header(cache), cache->len);
1222 skb_reset_mac_header(skb);
1223 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1226 skb_push(skb, mutable->tunnel_hlen);
1227 create_tunnel_header(vport, mutable, rt, skb->data);
1228 skb_reset_network_header(skb);
1231 skb_dst_set(skb, dst_clone(unattached_dst));
1233 skb_dst_set(skb, unattached_dst);
1234 unattached_dst = NULL;
1237 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1242 iph->frag_off = frag_off;
1243 ip_select_ident(iph, &rt_dst(rt), NULL);
1245 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1249 if (likely(cache)) {
1250 int orig_len = skb->len - cache->len;
1251 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1253 skb->protocol = htons(ETH_P_IP);
1255 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1259 if (unlikely(compute_ip_summed(skb, true))) {
1264 OVS_CB(skb)->flow = cache->flow;
1265 vport_receive(cache_vport, skb);
1266 sent_len += orig_len;
1270 skb->dev = rt_dst(rt).dev;
1271 xmit_err = dev_queue_xmit(skb);
1273 if (likely(net_xmit_eval(xmit_err) == 0))
1274 sent_len += orig_len;
1277 sent_len += send_frags(skb, mutable);
1283 if (unlikely(sent_len == 0))
1284 vport_record_error(vport, VPORT_E_TX_DROPPED);
1289 tnl_free_linked_skbs(skb);
1291 vport_record_error(vport, err);
1293 dst_release(unattached_dst);
1297 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1298 [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1299 [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1300 [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1301 [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1302 [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1303 [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1304 [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1307 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1308 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1309 const struct vport *cur_vport,
1310 struct tnl_mutable_config *mutable)
1312 const struct vport *old_vport;
1313 const struct tnl_mutable_config *old_mutable;
1314 struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1320 err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1324 if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1327 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1329 if (a[OVS_TUNNEL_ATTR_SRC_IPV4])
1330 mutable->saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1331 mutable->daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1333 if (a[OVS_TUNNEL_ATTR_TOS]) {
1334 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1335 if (mutable->tos != RT_TOS(mutable->tos))
1339 if (a[OVS_TUNNEL_ATTR_TTL])
1340 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1342 mutable->tunnel_type = tnl_ops->tunnel_type;
1343 if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1344 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1345 mutable->flags |= TNL_F_IN_KEY_MATCH;
1347 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1348 mutable->in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1351 if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1352 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1354 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1356 mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1357 if (mutable->tunnel_hlen < 0)
1358 return mutable->tunnel_hlen;
1360 mutable->tunnel_hlen += sizeof(struct iphdr);
1362 old_vport = tnl_find_port(mutable->saddr, mutable->daddr,
1363 mutable->in_key, mutable->tunnel_type,
1366 if (old_vport && old_vport != cur_vport)
1372 struct vport *tnl_create(const struct vport_parms *parms,
1373 const struct vport_ops *vport_ops,
1374 const struct tnl_ops *tnl_ops)
1376 struct vport *vport;
1377 struct tnl_vport *tnl_vport;
1378 struct tnl_mutable_config *mutable;
1379 int initial_frag_id;
1382 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1383 if (IS_ERR(vport)) {
1384 err = PTR_ERR(vport);
1388 tnl_vport = tnl_vport_priv(vport);
1390 strcpy(tnl_vport->name, parms->name);
1391 tnl_vport->tnl_ops = tnl_ops;
1393 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1396 goto error_free_vport;
1399 vport_gen_rand_ether_addr(mutable->eth_addr);
1401 get_random_bytes(&initial_frag_id, sizeof(int));
1402 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1404 err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1406 goto error_free_mutable;
1408 spin_lock_init(&tnl_vport->cache_lock);
1410 #ifdef NEED_CACHE_TIMEOUT
1411 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1412 (net_random() % (MAX_CACHE_EXP / 2));
1415 rcu_assign_pointer(tnl_vport->mutable, mutable);
1417 port_table_add_port(vport);
1425 return ERR_PTR(err);
1428 int tnl_set_options(struct vport *vport, struct nlattr *options)
1430 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1431 const struct tnl_mutable_config *old_mutable;
1432 struct tnl_mutable_config *mutable;
1435 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1441 /* Copy fields whose values should be retained. */
1442 old_mutable = rtnl_dereference(tnl_vport->mutable);
1443 mutable->seq = old_mutable->seq + 1;
1444 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1446 /* Parse the others configured by userspace. */
1447 err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1451 if (mutable_hash(mutable) != mutable_hash(old_mutable))
1452 port_table_move_port(vport, mutable);
1462 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1464 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1465 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1467 NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1468 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->daddr);
1470 if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1471 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->in_key);
1472 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1473 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1475 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->saddr);
1477 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1479 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1487 static void free_port_rcu(struct rcu_head *rcu)
1489 struct tnl_vport *tnl_vport = container_of(rcu,
1490 struct tnl_vport, rcu);
1492 free_cache((struct tnl_cache __force *)tnl_vport->cache);
1493 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1494 vport_free(tnl_vport_to_vport(tnl_vport));
1497 void tnl_destroy(struct vport *vport)
1499 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1500 const struct tnl_mutable_config *mutable;
1502 mutable = rtnl_dereference(tnl_vport->mutable);
1503 port_table_remove_port(vport);
1504 call_rcu(&tnl_vport->rcu, free_port_rcu);
1507 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1509 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1510 struct tnl_mutable_config *mutable;
1512 mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1513 sizeof(struct tnl_mutable_config), GFP_KERNEL);
1517 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1518 assign_config_rcu(vport, mutable);
1523 const char *tnl_get_name(const struct vport *vport)
1525 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1526 return tnl_vport->name;
1529 const unsigned char *tnl_get_addr(const struct vport *vport)
1531 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1532 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1535 void tnl_free_linked_skbs(struct sk_buff *skb)
1538 struct sk_buff *next = skb->next;
1548 port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1553 for (i = 0; i < PORT_TABLE_SIZE; i++)
1554 INIT_HLIST_HEAD(&port_table[i]);
1563 for (i = 0; i < PORT_TABLE_SIZE; i++) {
1564 struct tnl_vport * tnl_vport;
1565 struct hlist_head *hash_head;
1566 struct hlist_node *n;
1568 hash_head = &port_table[i];
1569 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {