2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
20 #include <net/dsfield.h>
23 #include <net/inet_ecn.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/route.h>
37 #include "vport-generic.h"
38 #include "vport-internal_dev.h"
40 #ifdef NEED_CACHE_TIMEOUT
42 * On kernels where we can't quickly detect changes in the rest of the system
43 * we use an expiration time to invalidate the cache. A shorter expiration
44 * reduces the length of time that we may potentially blackhole packets while
45 * a longer time increases performance by reducing the frequency that the
46 * cache needs to be rebuilt. A variety of factors may cause the cache to be
47 * invalidated before the expiration time but this is the maximum. The time
48 * is expressed in jiffies.
50 #define MAX_CACHE_EXP HZ
54 * Interval to check for and remove caches that are no longer valid. Caches
55 * are checked for validity before they are used for packet encapsulation and
56 * old caches are removed at that time. However, if no packets are sent through
57 * the tunnel then the cache will never be destroyed. Since it holds
58 * references to a number of system objects, the cache will continue to use
59 * system resources by not allowing those objects to be destroyed. The cache
60 * cleaner is periodically run to free invalid caches. It does not
61 * significantly affect system performance. A lower interval will release
62 * resources faster but will itself consume resources by requiring more frequent
63 * checks. A longer interval may result in messages being printed to the kernel
64 * message buffer about unreleased resources. The interval is expressed in
67 #define CACHE_CLEANER_INTERVAL (5 * HZ)
69 #define CACHE_DATA_ALIGN 16
71 static struct tbl __rcu *port_table __read_mostly;
73 static void cache_cleaner(struct work_struct *work);
74 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
77 * These are just used as an optimization: they don't require any kind of
78 * synchronization because we could have just as easily read the value before
79 * the port change happened.
81 static unsigned int key_local_remote_ports __read_mostly;
82 static unsigned int key_remote_ports __read_mostly;
83 static unsigned int local_remote_ports __read_mostly;
84 static unsigned int remote_ports __read_mostly;
86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87 #define rt_dst(rt) (rt->dst)
89 #define rt_dst(rt) (rt->u.dst)
92 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
94 return vport_from_priv(tnl_vport);
97 static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
99 return container_of(node, struct tnl_vport, tbl_node);
102 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
103 * cache_lock is held, so it is only for update side code.
105 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
107 return rcu_dereference_protected(tnl_vport->cache,
108 lockdep_is_held(&tnl_vport->cache_lock));
111 static inline void schedule_cache_cleaner(void)
113 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
116 static void free_cache(struct tnl_cache *cache)
121 flow_put(cache->flow);
122 ip_rt_put(cache->rt);
126 static void free_config_rcu(struct rcu_head *rcu)
128 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
132 static void free_cache_rcu(struct rcu_head *rcu)
134 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
138 static void assign_config_rcu(struct vport *vport,
139 struct tnl_mutable_config *new_config)
141 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
142 struct tnl_mutable_config *old_config;
144 old_config = rtnl_dereference(tnl_vport->mutable);
145 rcu_assign_pointer(tnl_vport->mutable, new_config);
146 call_rcu(&old_config->rcu, free_config_rcu);
149 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
151 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
152 struct tnl_cache *old_cache;
154 old_cache = cache_dereference(tnl_vport);
155 rcu_assign_pointer(tnl_vport->cache, new_cache);
158 call_rcu(&old_cache->rcu, free_cache_rcu);
161 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
163 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
164 if (mutable->port_config.saddr)
165 return &local_remote_ports;
167 return &remote_ports;
169 if (mutable->port_config.saddr)
170 return &key_local_remote_ports;
172 return &key_remote_ports;
176 struct port_lookup_key {
177 const struct tnl_mutable_config *mutable;
185 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
188 static int port_cmp(const struct tbl_node *node, void *target)
190 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
191 struct port_lookup_key *lookup = target;
193 lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
195 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
196 lookup->mutable->port_config.daddr == lookup->daddr &&
197 lookup->mutable->port_config.in_key == lookup->key &&
198 lookup->mutable->port_config.saddr == lookup->saddr);
201 static u32 port_hash(struct port_lookup_key *k)
203 u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
205 return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
208 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
210 struct port_lookup_key lookup;
212 lookup.saddr = mutable->port_config.saddr;
213 lookup.daddr = mutable->port_config.daddr;
214 lookup.key = mutable->port_config.in_key;
215 lookup.tunnel_type = mutable->tunnel_type;
217 return port_hash(&lookup);
220 static void check_table_empty(void)
222 struct tbl *old_table = rtnl_dereference(port_table);
224 if (tbl_count(old_table) == 0) {
225 cancel_delayed_work_sync(&cache_cleaner_wq);
226 rcu_assign_pointer(port_table, NULL);
227 tbl_deferred_destroy(old_table, NULL);
231 static int add_port(struct vport *vport)
233 struct tbl *cur_table = rtnl_dereference(port_table);
234 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
238 struct tbl *new_table;
240 new_table = tbl_create(0);
244 rcu_assign_pointer(port_table, new_table);
245 schedule_cache_cleaner();
247 } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
248 struct tbl *new_table;
250 new_table = tbl_expand(cur_table);
251 if (IS_ERR(new_table))
252 return PTR_ERR(new_table);
254 rcu_assign_pointer(port_table, new_table);
255 tbl_deferred_destroy(cur_table, NULL);
258 err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
259 mutable_hash(rtnl_dereference(tnl_vport->mutable)));
265 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
270 static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
273 struct tbl *cur_table = rtnl_dereference(port_table);
274 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
277 hash = mutable_hash(new_mutable);
278 if (hash == tnl_vport->tbl_node.hash)
282 * Ideally we should make this move atomic to avoid having gaps in
283 * finding tunnels or the possibility of failure. However, if we do
284 * find a tunnel it will always be consistent.
286 err = tbl_remove(cur_table, &tnl_vport->tbl_node);
290 err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
292 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
298 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
299 assign_config_rcu(vport, new_mutable);
300 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
305 static int del_port(struct vport *vport)
307 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
310 err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
315 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
320 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
322 const struct tnl_mutable_config **mutable)
324 struct port_lookup_key lookup;
325 struct tbl *table = rcu_dereference_rtnl(port_table);
326 struct tbl_node *tbl_node;
328 if (unlikely(!table))
331 lookup.saddr = saddr;
332 lookup.daddr = daddr;
334 if (tunnel_type & TNL_T_KEY_EXACT) {
336 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
338 if (key_local_remote_ports) {
339 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
344 if (key_remote_ports) {
347 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
351 lookup.saddr = saddr;
355 if (tunnel_type & TNL_T_KEY_MATCH) {
357 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
359 if (local_remote_ports) {
360 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
368 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
377 *mutable = lookup.mutable;
378 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
381 static inline void ecn_decapsulate(struct sk_buff *skb)
383 /* This is accessing the outer IP header of the tunnel, which we've
384 * already validated to be OK. skb->data is currently set to the start
385 * of the inner Ethernet header, and we've validated ETH_HLEN.
387 if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
388 __be16 protocol = skb->protocol;
390 skb_set_network_header(skb, ETH_HLEN);
392 if (skb->protocol == htons(ETH_P_8021Q)) {
393 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
396 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
397 skb_set_network_header(skb, VLAN_ETH_HLEN);
400 if (protocol == htons(ETH_P_IP)) {
401 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
402 + sizeof(struct iphdr))))
405 IP_ECN_set_ce(ip_hdr(skb));
407 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
408 else if (protocol == htons(ETH_P_IPV6)) {
409 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
410 + sizeof(struct ipv6hdr))))
413 IP6_ECN_set_ce(ipv6_hdr(skb));
419 /* Called with rcu_read_lock. */
420 void tnl_rcv(struct vport *vport, struct sk_buff *skb)
422 /* Packets received by this function are in the following state:
423 * - skb->data points to the inner Ethernet header.
424 * - The inner Ethernet header is in the linear data area.
425 * - skb->csum does not include the inner Ethernet header.
426 * - The layer pointers point at the outer headers.
429 struct ethhdr *eh = (struct ethhdr *)skb->data;
431 if (likely(ntohs(eh->h_proto) >= 1536))
432 skb->protocol = eh->h_proto;
434 skb->protocol = htons(ETH_P_802_2);
440 ecn_decapsulate(skb);
441 compute_ip_summed(skb, false);
443 vport_receive(vport, skb);
446 static bool check_ipv4_address(__be32 addr)
448 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
449 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
455 static bool ipv4_should_icmp(struct sk_buff *skb)
457 struct iphdr *old_iph = ip_hdr(skb);
459 /* Don't respond to L2 broadcast. */
460 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
463 /* Don't respond to L3 broadcast or invalid addresses. */
464 if (!check_ipv4_address(old_iph->daddr) ||
465 !check_ipv4_address(old_iph->saddr))
468 /* Only respond to the first fragment. */
469 if (old_iph->frag_off & htons(IP_OFFSET))
472 /* Don't respond to ICMP error messages. */
473 if (old_iph->protocol == IPPROTO_ICMP) {
474 u8 icmp_type, *icmp_typep;
476 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
477 (old_iph->ihl << 2) +
478 offsetof(struct icmphdr, type) -
479 skb->data, sizeof(icmp_type),
485 if (*icmp_typep > NR_ICMP_TYPES
486 || (*icmp_typep <= ICMP_PARAMETERPROB
487 && *icmp_typep != ICMP_ECHOREPLY
488 && *icmp_typep != ICMP_ECHO))
495 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
496 unsigned int mtu, unsigned int payload_length)
498 struct iphdr *iph, *old_iph = ip_hdr(skb);
499 struct icmphdr *icmph;
502 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
503 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
504 payload = skb_put(nskb, payload_length);
508 iph->ihl = sizeof(struct iphdr) >> 2;
509 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
510 IPTOS_PREC_INTERNETCONTROL;
511 iph->tot_len = htons(sizeof(struct iphdr)
512 + sizeof(struct icmphdr)
514 get_random_bytes(&iph->id, sizeof(iph->id));
517 iph->protocol = IPPROTO_ICMP;
518 iph->daddr = old_iph->saddr;
519 iph->saddr = old_iph->daddr;
524 icmph->type = ICMP_DEST_UNREACH;
525 icmph->code = ICMP_FRAG_NEEDED;
526 icmph->un.gateway = htonl(mtu);
529 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
530 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
531 payload, payload_length,
533 icmph->checksum = csum_fold(nskb->csum);
536 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
537 static bool ipv6_should_icmp(struct sk_buff *skb)
539 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
541 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
542 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
544 /* Check source address is valid. */
545 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
546 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
549 /* Don't reply to unspecified addresses. */
550 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
553 /* Don't respond to ICMP error messages. */
554 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
558 if (nexthdr == NEXTHDR_ICMP) {
559 u8 icmp_type, *icmp_typep;
561 icmp_typep = skb_header_pointer(skb, payload_off +
562 offsetof(struct icmp6hdr,
564 sizeof(icmp_type), &icmp_type);
566 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
573 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
574 unsigned int mtu, unsigned int payload_length)
576 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
577 struct icmp6hdr *icmp6h;
580 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
581 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
582 payload = skb_put(nskb, payload_length);
587 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
588 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
590 ipv6h->nexthdr = NEXTHDR_ICMP;
591 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
592 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
593 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
596 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
597 icmp6h->icmp6_code = 0;
598 icmp6h->icmp6_cksum = 0;
599 icmp6h->icmp6_mtu = htonl(mtu);
601 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
602 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
603 payload, payload_length,
605 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
606 sizeof(struct icmp6hdr)
608 ipv6h->nexthdr, nskb->csum);
612 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
613 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
615 unsigned int eth_hdr_len = ETH_HLEN;
616 unsigned int total_length = 0, header_length = 0, payload_length;
617 struct ethhdr *eh, *old_eh = eth_hdr(skb);
618 struct sk_buff *nskb;
621 if (skb->protocol == htons(ETH_P_IP)) {
622 if (mtu < IP_MIN_MTU)
625 if (!ipv4_should_icmp(skb))
628 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
629 else if (skb->protocol == htons(ETH_P_IPV6)) {
630 if (mtu < IPV6_MIN_MTU)
634 * In theory we should do PMTUD on IPv6 multicast messages but
635 * we don't have an address to send from so just fragment.
637 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
640 if (!ipv6_should_icmp(skb))
648 if (old_eh->h_proto == htons(ETH_P_8021Q))
649 eth_hdr_len = VLAN_ETH_HLEN;
651 payload_length = skb->len - eth_hdr_len;
652 if (skb->protocol == htons(ETH_P_IP)) {
653 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
654 total_length = min_t(unsigned int, header_length +
655 payload_length, 576);
657 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
659 header_length = sizeof(struct ipv6hdr) +
660 sizeof(struct icmp6hdr);
661 total_length = min_t(unsigned int, header_length +
662 payload_length, IPV6_MIN_MTU);
666 total_length = min(total_length, mutable->mtu);
667 payload_length = total_length - header_length;
669 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
674 skb_reserve(nskb, NET_IP_ALIGN);
676 /* Ethernet / VLAN */
677 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
678 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
679 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
680 nskb->protocol = eh->h_proto = old_eh->h_proto;
681 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
682 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
684 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
685 vh->h_vlan_encapsulated_proto = skb->protocol;
687 skb_reset_mac_header(nskb);
690 if (skb->protocol == htons(ETH_P_IP))
691 ipv4_build_icmp(skb, nskb, mtu, payload_length);
692 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
694 ipv6_build_icmp(skb, nskb, mtu, payload_length);
698 * Assume that flow based keys are symmetric with respect to input
699 * and output and use the key that we were going to put on the
700 * outgoing packet for the fake received packet. If the keys are
701 * not symmetric then PMTUD needs to be disabled since we won't have
702 * any way of synthesizing packets.
704 if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
705 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
706 OVS_CB(nskb)->tun_id = flow_key;
708 compute_ip_summed(nskb, false);
709 vport_receive(vport, nskb);
714 static bool check_mtu(struct sk_buff *skb,
716 const struct tnl_mutable_config *mutable,
717 const struct rtable *rt, __be16 *frag_offp)
722 frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
724 mtu = dst_mtu(&rt_dst(rt))
726 - mutable->tunnel_hlen
727 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
731 if (skb->protocol == htons(ETH_P_IP)) {
732 struct iphdr *old_iph = ip_hdr(skb);
734 frag_off |= old_iph->frag_off & htons(IP_DF);
735 mtu = max(mtu, IP_MIN_MTU);
737 if ((old_iph->frag_off & htons(IP_DF)) &&
738 mtu < ntohs(old_iph->tot_len)) {
739 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
743 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
744 else if (skb->protocol == htons(ETH_P_IPV6)) {
745 unsigned int packet_length = skb->len - ETH_HLEN
746 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
748 mtu = max(mtu, IPV6_MIN_MTU);
750 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
751 if (packet_length > IPV6_MIN_MTU)
752 frag_off = htons(IP_DF);
754 if (mtu < packet_length) {
755 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
761 *frag_offp = frag_off;
769 static void create_tunnel_header(const struct vport *vport,
770 const struct tnl_mutable_config *mutable,
771 const struct rtable *rt, void *header)
773 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
774 struct iphdr *iph = header;
777 iph->ihl = sizeof(struct iphdr) >> 2;
778 iph->frag_off = htons(IP_DF);
779 iph->protocol = tnl_vport->tnl_ops->ipproto;
780 iph->tos = mutable->port_config.tos;
781 iph->daddr = rt->rt_dst;
782 iph->saddr = rt->rt_src;
783 iph->ttl = mutable->port_config.ttl;
785 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
787 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
790 static inline void *get_cached_header(const struct tnl_cache *cache)
792 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
795 static inline bool check_cache_valid(const struct tnl_cache *cache,
796 const struct tnl_mutable_config *mutable)
799 #ifdef NEED_CACHE_TIMEOUT
800 time_before(jiffies, cache->expiration) &&
803 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
806 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
808 mutable->seq == cache->mutable_seq &&
809 (!is_internal_dev(rt_dst(cache->rt).dev) ||
810 (cache->flow && !cache->flow->dead));
813 static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
815 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
816 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
817 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
819 if (cache && !check_cache_valid(cache, mutable) &&
820 spin_trylock_bh(&tnl_vport->cache_lock)) {
821 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
822 spin_unlock_bh(&tnl_vport->cache_lock);
828 static void cache_cleaner(struct work_struct *work)
830 schedule_cache_cleaner();
833 tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
837 static inline void create_eth_hdr(struct tnl_cache *cache,
838 const struct rtable *rt)
840 void *cache_data = get_cached_header(cache);
841 int hh_len = rt_dst(rt).hh->hh_len;
842 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
848 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
849 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
850 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
852 cache->hh_seq = hh_seq;
854 read_lock_bh(&rt_dst(rt).hh->hh_lock);
855 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
856 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
860 static struct tnl_cache *build_cache(struct vport *vport,
861 const struct tnl_mutable_config *mutable,
864 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
865 struct tnl_cache *cache;
869 if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
873 * If there is no entry in the ARP cache or if this device does not
874 * support hard header caching just fall back to the IP stack.
880 * If lock is contended fall back to directly building the header.
881 * We're not going to help performance by sitting here spinning.
883 if (!spin_trylock_bh(&tnl_vport->cache_lock))
886 cache = cache_dereference(tnl_vport);
887 if (check_cache_valid(cache, mutable))
892 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
894 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
895 cache_len, GFP_ATOMIC);
899 cache->len = cache_len;
901 create_eth_hdr(cache, rt);
902 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
904 create_tunnel_header(vport, mutable, rt, cache_data);
906 cache->mutable_seq = mutable->seq;
908 #ifdef NEED_CACHE_TIMEOUT
909 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
912 if (is_internal_dev(rt_dst(rt).dev)) {
913 struct odp_flow_key flow_key;
914 struct tbl_node *flow_node;
920 vport = internal_dev_get_vport(rt_dst(rt).dev);
924 skb = alloc_skb(cache->len, GFP_ATOMIC);
928 __skb_put(skb, cache->len);
929 memcpy(skb->data, get_cached_header(cache), cache->len);
931 err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
937 flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
938 &flow_key, flow_hash(&flow_key),
941 struct sw_flow *flow = flow_cast(flow_node);
949 assign_cache_rcu(vport, cache);
952 spin_unlock_bh(&tnl_vport->cache_lock);
957 static struct rtable *find_route(struct vport *vport,
958 const struct tnl_mutable_config *mutable,
959 u8 tos, struct tnl_cache **cache)
961 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
962 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
967 if (likely(tos == mutable->port_config.tos &&
968 check_cache_valid(cur_cache, mutable))) {
970 return cur_cache->rt;
973 struct flowi fl = { .nl_u = { .ip4_u =
974 { .daddr = mutable->port_config.daddr,
975 .saddr = mutable->port_config.saddr,
977 .proto = tnl_vport->tnl_ops->ipproto };
979 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
982 if (likely(tos == mutable->port_config.tos))
983 *cache = build_cache(vport, mutable, rt);
989 static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
991 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
992 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
993 if (unlikely(!nskb)) {
995 return ERR_PTR(-ENOMEM);
998 set_skb_csum_bits(skb, nskb);
1001 skb_set_owner_w(nskb, skb->sk);
1010 static inline bool need_linearize(const struct sk_buff *skb)
1014 if (unlikely(skb_shinfo(skb)->frag_list))
1018 * Generally speaking we should linearize if there are paged frags.
1019 * However, if all of the refcounts are 1 we know nobody else can
1020 * change them from underneath us and we can skip the linearization.
1022 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1023 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1029 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1030 const struct tnl_mutable_config *mutable,
1031 const struct rtable *rt)
1036 forward_ip_summed(skb);
1038 err = vswitch_skb_checksum_setup(skb);
1042 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1043 + mutable->tunnel_hlen;
1045 if (skb_is_gso(skb)) {
1046 struct sk_buff *nskb;
1049 * If we are doing GSO on a pskb it is better to make sure that
1050 * the headroom is correct now. We will only have to copy the
1051 * portion in the linear data area and GSO will preserve
1052 * headroom when it creates the segments. This is particularly
1053 * beneficial on Xen where we get a lot of GSO pskbs.
1054 * Conversely, we avoid copying if it is just to get our own
1055 * writable clone because GSO will do the copy for us.
1057 if (skb_headroom(skb) < min_headroom) {
1058 skb = check_headroom(skb, min_headroom);
1065 nskb = skb_gso_segment(skb, 0);
1068 err = PTR_ERR(nskb);
1074 skb = check_headroom(skb, min_headroom);
1080 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1082 * Pages aren't locked and could change at any time.
1083 * If this happens after we compute the checksum, the
1084 * checksum will be wrong. We linearize now to avoid
1087 if (unlikely(need_linearize(skb))) {
1088 err = __skb_linearize(skb);
1093 err = skb_checksum_help(skb);
1096 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1097 skb->ip_summed = CHECKSUM_NONE;
1105 return ERR_PTR(err);
1108 static int send_frags(struct sk_buff *skb,
1109 const struct tnl_mutable_config *mutable)
1116 struct sk_buff *next = skb->next;
1117 int frag_len = skb->len - mutable->tunnel_hlen;
1120 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1122 err = ip_local_out(skb);
1123 if (likely(net_xmit_eval(err) == 0))
1124 sent_len += frag_len;
1137 * There's no point in continuing to send fragments once one has been
1138 * dropped so just free the rest. This may help improve the congestion
1139 * that caused the first packet to be dropped.
1141 tnl_free_linked_skbs(skb);
1145 int tnl_send(struct vport *vport, struct sk_buff *skb)
1147 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1148 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1150 enum vport_err_type err = VPORT_E_TX_ERROR;
1152 struct dst_entry *unattached_dst = NULL;
1153 struct tnl_cache *cache;
1160 /* Validate the protocol headers before we try to use them. */
1161 if (skb->protocol == htons(ETH_P_8021Q)) {
1162 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1165 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1166 skb_set_network_header(skb, VLAN_ETH_HLEN);
1169 if (skb->protocol == htons(ETH_P_IP)) {
1170 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1171 + sizeof(struct iphdr))))
1174 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1175 else if (skb->protocol == htons(ETH_P_IPV6)) {
1176 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1177 + sizeof(struct ipv6hdr))))
1183 if (skb->protocol == htons(ETH_P_IP))
1184 inner_tos = ip_hdr(skb)->tos;
1185 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1186 else if (skb->protocol == htons(ETH_P_IPV6))
1187 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1192 if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
1195 tos = mutable->port_config.tos;
1197 tos = INET_ECN_encapsulate(tos, inner_tos);
1200 rt = find_route(vport, mutable, tos, &cache);
1203 if (unlikely(!cache))
1204 unattached_dst = &rt_dst(rt);
1212 skb = handle_offloads(skb, mutable, rt);
1217 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1218 err = VPORT_E_TX_DROPPED;
1223 * If we are over the MTU, allow the IP stack to handle fragmentation.
1224 * Fragmentation is a slow path anyways.
1226 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1228 unattached_dst = &rt_dst(rt);
1229 dst_hold(unattached_dst);
1234 ttl = mutable->port_config.ttl;
1236 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1238 if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
1239 if (skb->protocol == htons(ETH_P_IP))
1240 ttl = ip_hdr(skb)->ttl;
1241 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1242 else if (skb->protocol == htons(ETH_P_IPV6))
1243 ttl = ipv6_hdr(skb)->hop_limit;
1249 struct sk_buff *next_skb = skb->next;
1252 if (likely(cache)) {
1253 skb_push(skb, cache->len);
1254 memcpy(skb->data, get_cached_header(cache), cache->len);
1255 skb_reset_mac_header(skb);
1256 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1259 skb_push(skb, mutable->tunnel_hlen);
1260 create_tunnel_header(vport, mutable, rt, skb->data);
1261 skb_reset_network_header(skb);
1264 skb_dst_set(skb, dst_clone(unattached_dst));
1266 skb_dst_set(skb, unattached_dst);
1267 unattached_dst = NULL;
1270 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1275 iph->frag_off = frag_off;
1276 ip_select_ident(iph, &rt_dst(rt), NULL);
1278 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1282 if (likely(cache)) {
1283 int orig_len = skb->len - cache->len;
1284 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1286 skb->protocol = htons(ETH_P_IP);
1288 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1292 OVS_CB(skb)->flow = cache->flow;
1293 compute_ip_summed(skb, true);
1294 vport_receive(cache_vport, skb);
1295 sent_len += orig_len;
1299 skb->dev = rt_dst(rt).dev;
1300 xmit_err = dev_queue_xmit(skb);
1302 if (likely(net_xmit_eval(xmit_err) == 0))
1303 sent_len += orig_len;
1306 sent_len += send_frags(skb, mutable);
1312 if (unlikely(sent_len == 0))
1313 vport_record_error(vport, VPORT_E_TX_DROPPED);
1318 tnl_free_linked_skbs(skb);
1320 dst_release(unattached_dst);
1321 vport_record_error(vport, err);
1326 static int set_config(const void *config, const struct tnl_ops *tnl_ops,
1327 const struct vport *cur_vport,
1328 struct tnl_mutable_config *mutable)
1330 const struct vport *old_vport;
1331 const struct tnl_mutable_config *old_mutable;
1333 mutable->port_config = *(struct tnl_port_config *)config;
1335 if (mutable->port_config.daddr == 0)
1338 if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
1341 mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
1342 if (mutable->tunnel_hlen < 0)
1343 return mutable->tunnel_hlen;
1345 mutable->tunnel_hlen += sizeof(struct iphdr);
1347 mutable->tunnel_type = tnl_ops->tunnel_type;
1348 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
1349 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1350 mutable->port_config.in_key = 0;
1352 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1354 old_vport = tnl_find_port(mutable->port_config.saddr,
1355 mutable->port_config.daddr,
1356 mutable->port_config.in_key,
1357 mutable->tunnel_type,
1360 if (old_vport && old_vport != cur_vport)
1363 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
1364 mutable->port_config.out_key = 0;
1369 struct vport *tnl_create(const struct vport_parms *parms,
1370 const struct vport_ops *vport_ops,
1371 const struct tnl_ops *tnl_ops)
1373 struct vport *vport;
1374 struct tnl_vport *tnl_vport;
1375 int initial_frag_id;
1378 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1379 if (IS_ERR(vport)) {
1380 err = PTR_ERR(vport);
1384 tnl_vport = tnl_vport_priv(vport);
1386 strcpy(tnl_vport->name, parms->name);
1387 tnl_vport->tnl_ops = tnl_ops;
1389 tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1390 if (!tnl_vport->mutable) {
1392 goto error_free_vport;
1395 vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
1396 tnl_vport->mutable->mtu = ETH_DATA_LEN;
1398 get_random_bytes(&initial_frag_id, sizeof(int));
1399 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1401 err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
1403 goto error_free_mutable;
1405 spin_lock_init(&tnl_vport->cache_lock);
1407 #ifdef NEED_CACHE_TIMEOUT
1408 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1409 (net_random() % (MAX_CACHE_EXP / 2));
1412 err = add_port(vport);
1414 goto error_free_mutable;
1419 kfree(tnl_vport->mutable);
1423 return ERR_PTR(err);
1426 int tnl_modify(struct vport *vport, struct odp_port *port)
1428 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1429 struct tnl_mutable_config *mutable;
1432 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1438 err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
1444 err = move_port(vport, mutable);
1456 static void free_port_rcu(struct rcu_head *rcu)
1458 struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
1460 spin_lock_bh(&tnl_vport->cache_lock);
1461 free_cache(tnl_vport->cache);
1462 spin_unlock_bh(&tnl_vport->cache_lock);
1464 kfree(tnl_vport->mutable);
1465 vport_free(tnl_vport_to_vport(tnl_vport));
1468 int tnl_destroy(struct vport *vport)
1470 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1471 const struct tnl_mutable_config *old_mutable;
1473 if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
1474 tnl_vport->mutable->port_config.daddr,
1475 tnl_vport->mutable->port_config.in_key,
1476 tnl_vport->mutable->tunnel_type,
1480 call_rcu(&tnl_vport->rcu, free_port_rcu);
1485 int tnl_set_mtu(struct vport *vport, int mtu)
1487 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1488 struct tnl_mutable_config *mutable;
1490 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1495 assign_config_rcu(vport, mutable);
1500 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1502 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1503 struct tnl_mutable_config *mutable;
1505 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1509 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1510 assign_config_rcu(vport, mutable);
1515 const char *tnl_get_name(const struct vport *vport)
1517 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1518 return tnl_vport->name;
1521 const unsigned char *tnl_get_addr(const struct vport *vport)
1523 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1524 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1527 int tnl_get_mtu(const struct vport *vport)
1529 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1530 return rcu_dereference_rtnl(tnl_vport->mutable)->mtu;
1533 void tnl_free_linked_skbs(struct sk_buff *skb)
1539 struct sk_buff *next = skb->next;