2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <linux/rculist.h>
22 #include <net/dsfield.h>
25 #include <net/inet_ecn.h>
27 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
30 #include <net/route.h>
39 #include "vport-generic.h"
40 #include "vport-internal_dev.h"
42 #ifdef NEED_CACHE_TIMEOUT
44 * On kernels where we can't quickly detect changes in the rest of the system
45 * we use an expiration time to invalidate the cache. A shorter expiration
46 * reduces the length of time that we may potentially blackhole packets while
47 * a longer time increases performance by reducing the frequency that the
48 * cache needs to be rebuilt. A variety of factors may cause the cache to be
49 * invalidated before the expiration time but this is the maximum. The time
50 * is expressed in jiffies.
52 #define MAX_CACHE_EXP HZ
56 * Interval to check for and remove caches that are no longer valid. Caches
57 * are checked for validity before they are used for packet encapsulation and
58 * old caches are removed at that time. However, if no packets are sent through
59 * the tunnel then the cache will never be destroyed. Since it holds
60 * references to a number of system objects, the cache will continue to use
61 * system resources by not allowing those objects to be destroyed. The cache
62 * cleaner is periodically run to free invalid caches. It does not
63 * significantly affect system performance. A lower interval will release
64 * resources faster but will itself consume resources by requiring more frequent
65 * checks. A longer interval may result in messages being printed to the kernel
66 * message buffer about unreleased resources. The interval is expressed in
69 #define CACHE_CLEANER_INTERVAL (5 * HZ)
71 #define CACHE_DATA_ALIGN 16
72 #define PORT_TABLE_SIZE 1024
74 static struct hlist_head *port_table __read_mostly;
75 static int port_table_count;
77 static void cache_cleaner(struct work_struct *work);
78 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
81 * These are just used as an optimization: they don't require any kind of
82 * synchronization because we could have just as easily read the value before
83 * the port change happened.
85 static unsigned int key_local_remote_ports __read_mostly;
86 static unsigned int key_remote_ports __read_mostly;
87 static unsigned int local_remote_ports __read_mostly;
88 static unsigned int remote_ports __read_mostly;
90 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
91 #define rt_dst(rt) (rt->dst)
93 #define rt_dst(rt) (rt->u.dst)
96 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
98 return vport_from_priv(tnl_vport);
101 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
102 * cache_lock is held, so it is only for update side code.
104 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
106 return rcu_dereference_protected(tnl_vport->cache,
107 lockdep_is_held(&tnl_vport->cache_lock));
110 static inline void schedule_cache_cleaner(void)
112 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
115 static void free_cache(struct tnl_cache *cache)
120 flow_put(cache->flow);
121 ip_rt_put(cache->rt);
125 static void free_config_rcu(struct rcu_head *rcu)
127 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
131 static void free_cache_rcu(struct rcu_head *rcu)
133 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
137 static void assign_config_rcu(struct vport *vport,
138 struct tnl_mutable_config *new_config)
140 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
141 struct tnl_mutable_config *old_config;
143 old_config = rtnl_dereference(tnl_vport->mutable);
144 rcu_assign_pointer(tnl_vport->mutable, new_config);
145 call_rcu(&old_config->rcu, free_config_rcu);
148 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
150 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
151 struct tnl_cache *old_cache;
153 old_cache = cache_dereference(tnl_vport);
154 rcu_assign_pointer(tnl_vport->cache, new_cache);
157 call_rcu(&old_cache->rcu, free_cache_rcu);
160 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
162 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
163 if (mutable->key.saddr)
164 return &local_remote_ports;
166 return &remote_ports;
168 if (mutable->key.saddr)
169 return &key_local_remote_ports;
171 return &key_remote_ports;
175 static u32 port_hash(const struct port_lookup_key *key)
177 return jhash2((u32*)key, (sizeof(*key) / sizeof(u32)), 0);
180 static inline struct hlist_head *find_bucket(u32 hash)
182 return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
185 static void port_table_add_port(struct vport *vport)
187 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
188 const struct tnl_mutable_config *mutable;
191 if (port_table_count == 0)
192 schedule_cache_cleaner();
194 mutable = rtnl_dereference(tnl_vport->mutable);
195 hash = port_hash(&mutable->key);
196 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
199 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
202 static void port_table_move_port(struct vport *vport,
203 struct tnl_mutable_config *new_mutable)
205 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
208 hash = port_hash(&new_mutable->key);
209 hlist_del_init_rcu(&tnl_vport->hash_node);
210 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
212 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
213 assign_config_rcu(vport, new_mutable);
214 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
217 static void port_table_remove_port(struct vport *vport)
219 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
221 hlist_del_init_rcu(&tnl_vport->hash_node);
224 if (port_table_count == 0)
225 cancel_delayed_work_sync(&cache_cleaner_wq);
227 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
230 static struct tnl_vport *port_table_lookup(struct port_lookup_key *key,
231 const struct tnl_mutable_config **pmutable)
233 struct hlist_node *n;
234 struct hlist_head *bucket;
235 u32 hash = port_hash(key);
236 struct tnl_vport * tnl_vport;
238 bucket = find_bucket(hash);
240 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
241 struct tnl_mutable_config *mutable;
243 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
244 if (!memcmp(&mutable->key, key, sizeof(*key))) {
253 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
255 const struct tnl_mutable_config **mutable)
257 struct port_lookup_key lookup;
258 struct tnl_vport * tnl_vport;
260 lookup.saddr = saddr;
261 lookup.daddr = daddr;
263 if (tunnel_type & TNL_T_KEY_EXACT) {
265 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
267 if (key_local_remote_ports) {
268 tnl_vport = port_table_lookup(&lookup, mutable);
270 return tnl_vport_to_vport(tnl_vport);
273 if (key_remote_ports) {
275 tnl_vport = port_table_lookup(&lookup, mutable);
277 return tnl_vport_to_vport(tnl_vport);
279 lookup.saddr = saddr;
283 if (tunnel_type & TNL_T_KEY_MATCH) {
285 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
287 if (local_remote_ports) {
288 tnl_vport = port_table_lookup(&lookup, mutable);
290 return tnl_vport_to_vport(tnl_vport);
295 tnl_vport = port_table_lookup(&lookup, mutable);
297 return tnl_vport_to_vport(tnl_vport);
304 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
306 if (unlikely(INET_ECN_is_ce(tos))) {
307 __be16 protocol = skb->protocol;
309 skb_set_network_header(skb, ETH_HLEN);
311 if (protocol == htons(ETH_P_8021Q)) {
312 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
315 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
316 skb_set_network_header(skb, VLAN_ETH_HLEN);
319 if (protocol == htons(ETH_P_IP)) {
320 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
321 + sizeof(struct iphdr))))
324 IP_ECN_set_ce(ip_hdr(skb));
326 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
327 else if (protocol == htons(ETH_P_IPV6)) {
328 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
329 + sizeof(struct ipv6hdr))))
332 IP6_ECN_set_ce(ipv6_hdr(skb));
339 * tnl_rcv - ingress point for generic tunnel code
341 * @vport: port this packet was received on
342 * @skb: received packet
343 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
345 * Must be called with rcu_read_lock.
347 * Packets received by this function are in the following state:
348 * - skb->data points to the inner Ethernet header.
349 * - The inner Ethernet header is in the linear data area.
350 * - skb->csum does not include the inner Ethernet header.
351 * - The layer pointers are undefined.
353 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
357 skb_reset_mac_header(skb);
360 if (likely(ntohs(eh->h_proto) >= 1536))
361 skb->protocol = eh->h_proto;
363 skb->protocol = htons(ETH_P_802_2);
367 skb_clear_rxhash(skb);
370 ecn_decapsulate(skb, tos);
371 vlan_set_tci(skb, 0);
373 if (unlikely(compute_ip_summed(skb, false))) {
378 vport_receive(vport, skb);
381 static bool check_ipv4_address(__be32 addr)
383 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
384 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
390 static bool ipv4_should_icmp(struct sk_buff *skb)
392 struct iphdr *old_iph = ip_hdr(skb);
394 /* Don't respond to L2 broadcast. */
395 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
398 /* Don't respond to L3 broadcast or invalid addresses. */
399 if (!check_ipv4_address(old_iph->daddr) ||
400 !check_ipv4_address(old_iph->saddr))
403 /* Only respond to the first fragment. */
404 if (old_iph->frag_off & htons(IP_OFFSET))
407 /* Don't respond to ICMP error messages. */
408 if (old_iph->protocol == IPPROTO_ICMP) {
409 u8 icmp_type, *icmp_typep;
411 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
412 (old_iph->ihl << 2) +
413 offsetof(struct icmphdr, type) -
414 skb->data, sizeof(icmp_type),
420 if (*icmp_typep > NR_ICMP_TYPES
421 || (*icmp_typep <= ICMP_PARAMETERPROB
422 && *icmp_typep != ICMP_ECHOREPLY
423 && *icmp_typep != ICMP_ECHO))
430 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
431 unsigned int mtu, unsigned int payload_length)
433 struct iphdr *iph, *old_iph = ip_hdr(skb);
434 struct icmphdr *icmph;
437 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
438 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
439 payload = skb_put(nskb, payload_length);
443 iph->ihl = sizeof(struct iphdr) >> 2;
444 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
445 IPTOS_PREC_INTERNETCONTROL;
446 iph->tot_len = htons(sizeof(struct iphdr)
447 + sizeof(struct icmphdr)
449 get_random_bytes(&iph->id, sizeof(iph->id));
452 iph->protocol = IPPROTO_ICMP;
453 iph->daddr = old_iph->saddr;
454 iph->saddr = old_iph->daddr;
459 icmph->type = ICMP_DEST_UNREACH;
460 icmph->code = ICMP_FRAG_NEEDED;
461 icmph->un.gateway = htonl(mtu);
464 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
465 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
466 payload, payload_length,
468 icmph->checksum = csum_fold(nskb->csum);
471 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
472 static bool ipv6_should_icmp(struct sk_buff *skb)
474 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
476 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
477 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
479 /* Check source address is valid. */
480 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
481 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
484 /* Don't reply to unspecified addresses. */
485 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
488 /* Don't respond to ICMP error messages. */
489 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
493 if (nexthdr == NEXTHDR_ICMP) {
494 u8 icmp_type, *icmp_typep;
496 icmp_typep = skb_header_pointer(skb, payload_off +
497 offsetof(struct icmp6hdr,
499 sizeof(icmp_type), &icmp_type);
501 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
508 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
509 unsigned int mtu, unsigned int payload_length)
511 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
512 struct icmp6hdr *icmp6h;
515 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
516 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
517 payload = skb_put(nskb, payload_length);
522 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
523 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
525 ipv6h->nexthdr = NEXTHDR_ICMP;
526 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
527 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
528 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
531 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
532 icmp6h->icmp6_code = 0;
533 icmp6h->icmp6_cksum = 0;
534 icmp6h->icmp6_mtu = htonl(mtu);
536 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
537 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
538 payload, payload_length,
540 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
541 sizeof(struct icmp6hdr)
543 ipv6h->nexthdr, nskb->csum);
547 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
548 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
550 unsigned int eth_hdr_len = ETH_HLEN;
551 unsigned int total_length = 0, header_length = 0, payload_length;
552 struct ethhdr *eh, *old_eh = eth_hdr(skb);
553 struct sk_buff *nskb;
556 if (skb->protocol == htons(ETH_P_IP)) {
557 if (mtu < IP_MIN_MTU)
560 if (!ipv4_should_icmp(skb))
563 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
564 else if (skb->protocol == htons(ETH_P_IPV6)) {
565 if (mtu < IPV6_MIN_MTU)
569 * In theory we should do PMTUD on IPv6 multicast messages but
570 * we don't have an address to send from so just fragment.
572 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
575 if (!ipv6_should_icmp(skb))
583 if (old_eh->h_proto == htons(ETH_P_8021Q))
584 eth_hdr_len = VLAN_ETH_HLEN;
586 payload_length = skb->len - eth_hdr_len;
587 if (skb->protocol == htons(ETH_P_IP)) {
588 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
589 total_length = min_t(unsigned int, header_length +
590 payload_length, 576);
592 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
594 header_length = sizeof(struct ipv6hdr) +
595 sizeof(struct icmp6hdr);
596 total_length = min_t(unsigned int, header_length +
597 payload_length, IPV6_MIN_MTU);
601 payload_length = total_length - header_length;
603 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
608 skb_reserve(nskb, NET_IP_ALIGN);
610 /* Ethernet / VLAN */
611 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
612 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
613 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
614 nskb->protocol = eh->h_proto = old_eh->h_proto;
615 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
616 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
618 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
619 vh->h_vlan_encapsulated_proto = skb->protocol;
621 vlan_set_tci(nskb, vlan_get_tci(skb));
622 skb_reset_mac_header(nskb);
625 if (skb->protocol == htons(ETH_P_IP))
626 ipv4_build_icmp(skb, nskb, mtu, payload_length);
627 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
629 ipv6_build_icmp(skb, nskb, mtu, payload_length);
633 * Assume that flow based keys are symmetric with respect to input
634 * and output and use the key that we were going to put on the
635 * outgoing packet for the fake received packet. If the keys are
636 * not symmetric then PMTUD needs to be disabled since we won't have
637 * any way of synthesizing packets.
639 if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
640 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
641 OVS_CB(nskb)->tun_id = flow_key;
643 if (unlikely(compute_ip_summed(nskb, false))) {
648 vport_receive(vport, nskb);
653 static bool check_mtu(struct sk_buff *skb,
655 const struct tnl_mutable_config *mutable,
656 const struct rtable *rt, __be16 *frag_offp)
658 bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
659 bool pmtud = mutable->flags & TNL_F_PMTUD;
660 __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
662 unsigned int packet_length = skb->len - ETH_HLEN;
664 /* Allow for one level of tagging in the packet length. */
665 if (!vlan_tx_tag_present(skb) &&
666 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
667 packet_length -= VLAN_HLEN;
672 /* The tag needs to go in packet regardless of where it
673 * currently is, so subtract it from the MTU.
675 if (vlan_tx_tag_present(skb) ||
676 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
677 vlan_header = VLAN_HLEN;
679 mtu = dst_mtu(&rt_dst(rt))
681 - mutable->tunnel_hlen
685 if (skb->protocol == htons(ETH_P_IP)) {
686 struct iphdr *iph = ip_hdr(skb);
689 frag_off = iph->frag_off & htons(IP_DF);
691 if (pmtud && iph->frag_off & htons(IP_DF)) {
692 mtu = max(mtu, IP_MIN_MTU);
694 if (packet_length > mtu &&
695 tnl_frag_needed(vport, mutable, skb, mtu,
696 OVS_CB(skb)->tun_id))
700 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
701 else if (skb->protocol == htons(ETH_P_IPV6)) {
702 /* IPv6 requires end hosts to do fragmentation
703 * if the packet is above the minimum MTU.
705 if (df_inherit && packet_length > IPV6_MIN_MTU)
706 frag_off = htons(IP_DF);
709 mtu = max(mtu, IPV6_MIN_MTU);
711 if (packet_length > mtu &&
712 tnl_frag_needed(vport, mutable, skb, mtu,
713 OVS_CB(skb)->tun_id))
719 *frag_offp = frag_off;
723 static void create_tunnel_header(const struct vport *vport,
724 const struct tnl_mutable_config *mutable,
725 const struct rtable *rt, void *header)
727 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
728 struct iphdr *iph = header;
731 iph->ihl = sizeof(struct iphdr) >> 2;
732 iph->frag_off = htons(IP_DF);
733 iph->protocol = tnl_vport->tnl_ops->ipproto;
734 iph->tos = mutable->tos;
735 iph->daddr = rt->rt_dst;
736 iph->saddr = rt->rt_src;
737 iph->ttl = mutable->ttl;
739 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
741 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
744 static inline void *get_cached_header(const struct tnl_cache *cache)
746 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
749 static inline bool check_cache_valid(const struct tnl_cache *cache,
750 const struct tnl_mutable_config *mutable)
753 #ifdef NEED_CACHE_TIMEOUT
754 time_before(jiffies, cache->expiration) &&
757 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
760 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
762 mutable->seq == cache->mutable_seq &&
763 (!is_internal_dev(rt_dst(cache->rt).dev) ||
764 (cache->flow && !cache->flow->dead));
767 static void __cache_cleaner(struct tnl_vport *tnl_vport)
769 const struct tnl_mutable_config *mutable =
770 rcu_dereference(tnl_vport->mutable);
771 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
773 if (cache && !check_cache_valid(cache, mutable) &&
774 spin_trylock_bh(&tnl_vport->cache_lock)) {
775 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
776 spin_unlock_bh(&tnl_vport->cache_lock);
780 static void cache_cleaner(struct work_struct *work)
784 schedule_cache_cleaner();
787 for (i = 0; i < PORT_TABLE_SIZE; i++) {
788 struct hlist_node *n;
789 struct hlist_head *bucket;
790 struct tnl_vport *tnl_vport;
792 bucket = &port_table[i];
793 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
794 __cache_cleaner(tnl_vport);
799 static inline void create_eth_hdr(struct tnl_cache *cache,
800 const struct rtable *rt)
802 void *cache_data = get_cached_header(cache);
803 int hh_len = rt_dst(rt).hh->hh_len;
804 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
810 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
811 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
812 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
814 cache->hh_seq = hh_seq;
816 read_lock_bh(&rt_dst(rt).hh->hh_lock);
817 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
818 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
822 static struct tnl_cache *build_cache(struct vport *vport,
823 const struct tnl_mutable_config *mutable,
826 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
827 struct tnl_cache *cache;
831 if (!(mutable->flags & TNL_F_HDR_CACHE))
835 * If there is no entry in the ARP cache or if this device does not
836 * support hard header caching just fall back to the IP stack.
842 * If lock is contended fall back to directly building the header.
843 * We're not going to help performance by sitting here spinning.
845 if (!spin_trylock_bh(&tnl_vport->cache_lock))
848 cache = cache_dereference(tnl_vport);
849 if (check_cache_valid(cache, mutable))
854 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
856 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
857 cache_len, GFP_ATOMIC);
861 cache->len = cache_len;
863 create_eth_hdr(cache, rt);
864 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
866 create_tunnel_header(vport, mutable, rt, cache_data);
868 cache->mutable_seq = mutable->seq;
870 #ifdef NEED_CACHE_TIMEOUT
871 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
874 if (is_internal_dev(rt_dst(rt).dev)) {
875 struct sw_flow_key flow_key;
876 struct vport *dst_vport;
881 struct sw_flow *flow;
883 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
887 skb = alloc_skb(cache->len, GFP_ATOMIC);
891 __skb_put(skb, cache->len);
892 memcpy(skb->data, get_cached_header(cache), cache->len);
894 err = flow_extract(skb, dst_vport->port_no, &flow_key,
895 &flow_key_len, &is_frag);
901 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
902 &flow_key, flow_key_len);
910 assign_cache_rcu(vport, cache);
913 spin_unlock_bh(&tnl_vport->cache_lock);
918 static struct rtable *find_route(struct vport *vport,
919 const struct tnl_mutable_config *mutable,
920 u8 tos, struct tnl_cache **cache)
922 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
923 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
928 if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
930 return cur_cache->rt;
933 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
934 struct flowi fl = { .nl_u = { .ip4_u =
935 { .daddr = mutable->key.daddr,
936 .saddr = mutable->key.saddr,
938 .proto = tnl_vport->tnl_ops->ipproto };
940 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
943 struct flowi4 fl = { .daddr = mutable->key.daddr,
944 .saddr = mutable->key.saddr,
946 .flowi4_proto = tnl_vport->tnl_ops->ipproto };
948 rt = ip_route_output_key(&init_net, &fl);
953 if (likely(tos == mutable->tos))
954 *cache = build_cache(vport, mutable, rt);
960 static inline bool need_linearize(const struct sk_buff *skb)
964 if (unlikely(skb_shinfo(skb)->frag_list))
968 * Generally speaking we should linearize if there are paged frags.
969 * However, if all of the refcounts are 1 we know nobody else can
970 * change them from underneath us and we can skip the linearization.
972 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
973 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
979 static struct sk_buff *handle_offloads(struct sk_buff *skb,
980 const struct tnl_mutable_config *mutable,
981 const struct rtable *rt)
986 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
987 + mutable->tunnel_hlen
988 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
990 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
991 int head_delta = SKB_DATA_ALIGN(min_headroom -
994 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1000 forward_ip_summed(skb, true);
1002 if (skb_is_gso(skb)) {
1003 struct sk_buff *nskb;
1005 nskb = skb_gso_segment(skb, 0);
1008 err = PTR_ERR(nskb);
1014 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1015 /* Pages aren't locked and could change at any time.
1016 * If this happens after we compute the checksum, the
1017 * checksum will be wrong. We linearize now to avoid
1020 if (unlikely(need_linearize(skb))) {
1021 err = __skb_linearize(skb);
1026 err = skb_checksum_help(skb);
1031 set_ip_summed(skb, OVS_CSUM_NONE);
1038 return ERR_PTR(err);
1041 static int send_frags(struct sk_buff *skb,
1042 const struct tnl_mutable_config *mutable)
1048 struct sk_buff *next = skb->next;
1049 int frag_len = skb->len - mutable->tunnel_hlen;
1053 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1055 err = ip_local_out(skb);
1057 if (unlikely(net_xmit_eval(err)))
1059 sent_len += frag_len;
1066 * There's no point in continuing to send fragments once one has been
1067 * dropped so just free the rest. This may help improve the congestion
1068 * that caused the first packet to be dropped.
1070 tnl_free_linked_skbs(skb);
1074 int tnl_send(struct vport *vport, struct sk_buff *skb)
1076 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1077 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1079 enum vport_err_type err = VPORT_E_TX_ERROR;
1081 struct dst_entry *unattached_dst = NULL;
1082 struct tnl_cache *cache;
1084 __be16 frag_off = 0;
1089 /* Validate the protocol headers before we try to use them. */
1090 if (skb->protocol == htons(ETH_P_8021Q) &&
1091 !vlan_tx_tag_present(skb)) {
1092 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1095 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1096 skb_set_network_header(skb, VLAN_ETH_HLEN);
1099 if (skb->protocol == htons(ETH_P_IP)) {
1100 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1101 + sizeof(struct iphdr))))
1104 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1105 else if (skb->protocol == htons(ETH_P_IPV6)) {
1106 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1107 + sizeof(struct ipv6hdr))))
1113 if (skb->protocol == htons(ETH_P_IP))
1114 inner_tos = ip_hdr(skb)->tos;
1115 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1116 else if (skb->protocol == htons(ETH_P_IPV6))
1117 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1122 if (mutable->flags & TNL_F_TOS_INHERIT)
1127 tos = INET_ECN_encapsulate(tos, inner_tos);
1130 rt = find_route(vport, mutable, tos, &cache);
1133 if (unlikely(!cache))
1134 unattached_dst = &rt_dst(rt);
1140 skb_clear_rxhash(skb);
1143 skb = handle_offloads(skb, mutable, rt);
1148 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1149 err = VPORT_E_TX_DROPPED;
1154 * If we are over the MTU, allow the IP stack to handle fragmentation.
1155 * Fragmentation is a slow path anyways.
1157 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1159 unattached_dst = &rt_dst(rt);
1160 dst_hold(unattached_dst);
1167 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1169 if (mutable->flags & TNL_F_TTL_INHERIT) {
1170 if (skb->protocol == htons(ETH_P_IP))
1171 ttl = ip_hdr(skb)->ttl;
1172 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1173 else if (skb->protocol == htons(ETH_P_IPV6))
1174 ttl = ipv6_hdr(skb)->hop_limit;
1180 struct sk_buff *next_skb = skb->next;
1183 if (unlikely(vlan_deaccel_tag(skb)))
1186 if (likely(cache)) {
1187 skb_push(skb, cache->len);
1188 memcpy(skb->data, get_cached_header(cache), cache->len);
1189 skb_reset_mac_header(skb);
1190 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1193 skb_push(skb, mutable->tunnel_hlen);
1194 create_tunnel_header(vport, mutable, rt, skb->data);
1195 skb_reset_network_header(skb);
1198 skb_dst_set(skb, dst_clone(unattached_dst));
1200 skb_dst_set(skb, unattached_dst);
1201 unattached_dst = NULL;
1204 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1209 iph->frag_off = frag_off;
1210 ip_select_ident(iph, &rt_dst(rt), NULL);
1212 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1216 if (likely(cache)) {
1217 int orig_len = skb->len - cache->len;
1218 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1220 skb->protocol = htons(ETH_P_IP);
1222 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1226 if (unlikely(compute_ip_summed(skb, true))) {
1231 OVS_CB(skb)->flow = cache->flow;
1232 vport_receive(cache_vport, skb);
1233 sent_len += orig_len;
1237 skb->dev = rt_dst(rt).dev;
1238 xmit_err = dev_queue_xmit(skb);
1240 if (likely(net_xmit_eval(xmit_err) == 0))
1241 sent_len += orig_len;
1244 sent_len += send_frags(skb, mutable);
1250 if (unlikely(sent_len == 0))
1251 vport_record_error(vport, VPORT_E_TX_DROPPED);
1256 tnl_free_linked_skbs(skb);
1258 vport_record_error(vport, err);
1260 dst_release(unattached_dst);
1264 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1265 [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1266 [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1267 [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1268 [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1269 [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1270 [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1271 [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1274 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1275 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1276 const struct vport *cur_vport,
1277 struct tnl_mutable_config *mutable)
1279 const struct vport *old_vport;
1280 const struct tnl_mutable_config *old_mutable;
1281 struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1287 err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1291 if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1294 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1296 if (a[OVS_TUNNEL_ATTR_SRC_IPV4])
1297 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1298 mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1300 if (a[OVS_TUNNEL_ATTR_TOS]) {
1301 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1302 if (mutable->tos != RT_TOS(mutable->tos))
1306 if (a[OVS_TUNNEL_ATTR_TTL])
1307 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1309 mutable->key.tunnel_type = tnl_ops->tunnel_type;
1310 if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1311 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1312 mutable->flags |= TNL_F_IN_KEY_MATCH;
1314 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1315 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1318 if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1319 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1321 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1323 mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1324 if (mutable->tunnel_hlen < 0)
1325 return mutable->tunnel_hlen;
1327 mutable->tunnel_hlen += sizeof(struct iphdr);
1329 old_vport = tnl_find_port(mutable->key.saddr, mutable->key.daddr,
1330 mutable->key.in_key, mutable->key.tunnel_type,
1333 if (old_vport && old_vport != cur_vport)
1339 struct vport *tnl_create(const struct vport_parms *parms,
1340 const struct vport_ops *vport_ops,
1341 const struct tnl_ops *tnl_ops)
1343 struct vport *vport;
1344 struct tnl_vport *tnl_vport;
1345 struct tnl_mutable_config *mutable;
1346 int initial_frag_id;
1349 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1350 if (IS_ERR(vport)) {
1351 err = PTR_ERR(vport);
1355 tnl_vport = tnl_vport_priv(vport);
1357 strcpy(tnl_vport->name, parms->name);
1358 tnl_vport->tnl_ops = tnl_ops;
1360 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1363 goto error_free_vport;
1366 vport_gen_rand_ether_addr(mutable->eth_addr);
1368 get_random_bytes(&initial_frag_id, sizeof(int));
1369 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1371 err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1373 goto error_free_mutable;
1375 spin_lock_init(&tnl_vport->cache_lock);
1377 #ifdef NEED_CACHE_TIMEOUT
1378 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1379 (net_random() % (MAX_CACHE_EXP / 2));
1382 rcu_assign_pointer(tnl_vport->mutable, mutable);
1384 port_table_add_port(vport);
1392 return ERR_PTR(err);
1395 int tnl_set_options(struct vport *vport, struct nlattr *options)
1397 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1398 const struct tnl_mutable_config *old_mutable;
1399 struct tnl_mutable_config *mutable;
1402 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1408 /* Copy fields whose values should be retained. */
1409 old_mutable = rtnl_dereference(tnl_vport->mutable);
1410 mutable->seq = old_mutable->seq + 1;
1411 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1413 /* Parse the others configured by userspace. */
1414 err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1418 if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1419 port_table_move_port(vport, mutable);
1429 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1431 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1432 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1434 NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1435 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1437 if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1438 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1439 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1440 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1441 if (mutable->key.saddr)
1442 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1444 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1446 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1454 static void free_port_rcu(struct rcu_head *rcu)
1456 struct tnl_vport *tnl_vport = container_of(rcu,
1457 struct tnl_vport, rcu);
1459 free_cache((struct tnl_cache __force *)tnl_vport->cache);
1460 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1461 vport_free(tnl_vport_to_vport(tnl_vport));
1464 void tnl_destroy(struct vport *vport)
1466 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1467 const struct tnl_mutable_config *mutable;
1469 mutable = rtnl_dereference(tnl_vport->mutable);
1470 port_table_remove_port(vport);
1471 call_rcu(&tnl_vport->rcu, free_port_rcu);
1474 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1476 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1477 struct tnl_mutable_config *mutable;
1479 mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1480 sizeof(struct tnl_mutable_config), GFP_KERNEL);
1484 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1485 assign_config_rcu(vport, mutable);
1490 const char *tnl_get_name(const struct vport *vport)
1492 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1493 return tnl_vport->name;
1496 const unsigned char *tnl_get_addr(const struct vport *vport)
1498 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1499 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1502 void tnl_free_linked_skbs(struct sk_buff *skb)
1505 struct sk_buff *next = skb->next;
1515 port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1520 for (i = 0; i < PORT_TABLE_SIZE; i++)
1521 INIT_HLIST_HEAD(&port_table[i]);
1530 for (i = 0; i < PORT_TABLE_SIZE; i++) {
1531 struct tnl_vport * tnl_vport;
1532 struct hlist_head *hash_head;
1533 struct hlist_node *n;
1535 hash_head = &port_table[i];
1536 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {