2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
20 #include <net/dsfield.h>
23 #include <net/inet_ecn.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/route.h>
38 #include "vport-generic.h"
39 #include "vport-internal_dev.h"
41 #ifdef NEED_CACHE_TIMEOUT
43 * On kernels where we can't quickly detect changes in the rest of the system
44 * we use an expiration time to invalidate the cache. A shorter expiration
45 * reduces the length of time that we may potentially blackhole packets while
46 * a longer time increases performance by reducing the frequency that the
47 * cache needs to be rebuilt. A variety of factors may cause the cache to be
48 * invalidated before the expiration time but this is the maximum. The time
49 * is expressed in jiffies.
51 #define MAX_CACHE_EXP HZ
55 * Interval to check for and remove caches that are no longer valid. Caches
56 * are checked for validity before they are used for packet encapsulation and
57 * old caches are removed at that time. However, if no packets are sent through
58 * the tunnel then the cache will never be destroyed. Since it holds
59 * references to a number of system objects, the cache will continue to use
60 * system resources by not allowing those objects to be destroyed. The cache
61 * cleaner is periodically run to free invalid caches. It does not
62 * significantly affect system performance. A lower interval will release
63 * resources faster but will itself consume resources by requiring more frequent
64 * checks. A longer interval may result in messages being printed to the kernel
65 * message buffer about unreleased resources. The interval is expressed in
68 #define CACHE_CLEANER_INTERVAL (5 * HZ)
70 #define CACHE_DATA_ALIGN 16
72 static struct tbl __rcu *port_table __read_mostly;
74 static void cache_cleaner(struct work_struct *work);
75 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
78 * These are just used as an optimization: they don't require any kind of
79 * synchronization because we could have just as easily read the value before
80 * the port change happened.
82 static unsigned int key_local_remote_ports __read_mostly;
83 static unsigned int key_remote_ports __read_mostly;
84 static unsigned int local_remote_ports __read_mostly;
85 static unsigned int remote_ports __read_mostly;
87 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
88 #define rt_dst(rt) (rt->dst)
90 #define rt_dst(rt) (rt->u.dst)
93 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
95 return vport_from_priv(tnl_vport);
98 static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
100 return container_of(node, struct tnl_vport, tbl_node);
103 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
104 * cache_lock is held, so it is only for update side code.
106 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
108 return rcu_dereference_protected(tnl_vport->cache,
109 lockdep_is_held(&tnl_vport->cache_lock));
112 static inline void schedule_cache_cleaner(void)
114 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
117 static void free_cache(struct tnl_cache *cache)
122 flow_put(cache->flow);
123 ip_rt_put(cache->rt);
127 static void free_config_rcu(struct rcu_head *rcu)
129 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
133 static void free_cache_rcu(struct rcu_head *rcu)
135 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
139 static void assign_config_rcu(struct vport *vport,
140 struct tnl_mutable_config *new_config)
142 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
143 struct tnl_mutable_config *old_config;
145 old_config = rtnl_dereference(tnl_vport->mutable);
146 rcu_assign_pointer(tnl_vport->mutable, new_config);
147 call_rcu(&old_config->rcu, free_config_rcu);
150 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
152 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
153 struct tnl_cache *old_cache;
155 old_cache = cache_dereference(tnl_vport);
156 rcu_assign_pointer(tnl_vport->cache, new_cache);
159 call_rcu(&old_cache->rcu, free_cache_rcu);
162 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
164 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
166 return &local_remote_ports;
168 return &remote_ports;
171 return &key_local_remote_ports;
173 return &key_remote_ports;
177 struct port_lookup_key {
178 const struct tnl_mutable_config *mutable;
186 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
189 static int port_cmp(const struct tbl_node *node, void *target, int unused)
191 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
192 struct port_lookup_key *lookup = target;
194 lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
196 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
197 lookup->mutable->daddr == lookup->daddr &&
198 lookup->mutable->in_key == lookup->key &&
199 lookup->mutable->saddr == lookup->saddr);
202 static u32 port_hash(struct port_lookup_key *k)
204 u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
206 return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
209 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
211 struct port_lookup_key lookup;
213 lookup.saddr = mutable->saddr;
214 lookup.daddr = mutable->daddr;
215 lookup.key = mutable->in_key;
216 lookup.tunnel_type = mutable->tunnel_type;
218 return port_hash(&lookup);
221 static void check_table_empty(void)
223 struct tbl *old_table = rtnl_dereference(port_table);
225 if (tbl_count(old_table) == 0) {
226 cancel_delayed_work_sync(&cache_cleaner_wq);
227 rcu_assign_pointer(port_table, NULL);
228 tbl_deferred_destroy(old_table, NULL);
232 static int add_port(struct vport *vport)
234 struct tbl *cur_table = rtnl_dereference(port_table);
235 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
239 struct tbl *new_table;
241 new_table = tbl_create(TBL_MIN_BUCKETS);
245 rcu_assign_pointer(port_table, new_table);
246 schedule_cache_cleaner();
248 } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
249 struct tbl *new_table;
251 new_table = tbl_expand(cur_table);
252 if (IS_ERR(new_table)) {
253 if (PTR_ERR(new_table) != -ENOSPC)
254 return PTR_ERR(new_table);
256 rcu_assign_pointer(port_table, new_table);
257 tbl_deferred_destroy(cur_table, NULL);
261 err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
262 mutable_hash(rtnl_dereference(tnl_vport->mutable)));
268 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
273 static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
276 struct tbl *cur_table = rtnl_dereference(port_table);
277 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
280 hash = mutable_hash(new_mutable);
281 if (hash == tnl_vport->tbl_node.hash)
285 * Ideally we should make this move atomic to avoid having gaps in
286 * finding tunnels or the possibility of failure. However, if we do
287 * find a tunnel it will always be consistent.
289 err = tbl_remove(cur_table, &tnl_vport->tbl_node);
293 err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
295 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
301 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
302 assign_config_rcu(vport, new_mutable);
303 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
308 static int del_port(struct vport *vport)
310 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
313 err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
318 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
323 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
325 const struct tnl_mutable_config **mutable)
327 struct port_lookup_key lookup;
328 struct tbl *table = rcu_dereference_rtnl(port_table);
329 struct tbl_node *tbl_node;
331 if (unlikely(!table))
334 lookup.saddr = saddr;
335 lookup.daddr = daddr;
337 if (tunnel_type & TNL_T_KEY_EXACT) {
339 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
341 if (key_local_remote_ports) {
342 tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
343 port_hash(&lookup), port_cmp);
348 if (key_remote_ports) {
351 tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
352 port_hash(&lookup), port_cmp);
356 lookup.saddr = saddr;
360 if (tunnel_type & TNL_T_KEY_MATCH) {
362 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
364 if (local_remote_ports) {
365 tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
366 port_hash(&lookup), port_cmp);
374 tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
375 port_hash(&lookup), port_cmp);
384 *mutable = lookup.mutable;
385 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
388 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
390 if (unlikely(INET_ECN_is_ce(tos))) {
391 __be16 protocol = skb->protocol;
393 skb_set_network_header(skb, ETH_HLEN);
395 if (protocol == htons(ETH_P_8021Q)) {
396 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
399 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
400 skb_set_network_header(skb, VLAN_ETH_HLEN);
403 if (protocol == htons(ETH_P_IP)) {
404 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
405 + sizeof(struct iphdr))))
408 IP_ECN_set_ce(ip_hdr(skb));
410 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
411 else if (protocol == htons(ETH_P_IPV6)) {
412 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
413 + sizeof(struct ipv6hdr))))
416 IP6_ECN_set_ce(ipv6_hdr(skb));
423 * tnl_rcv - ingress point for generic tunnel code
425 * @vport: port this packet was received on
426 * @skb: received packet
427 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
429 * Must be called with rcu_read_lock.
431 * Packets received by this function are in the following state:
432 * - skb->data points to the inner Ethernet header.
433 * - The inner Ethernet header is in the linear data area.
434 * - skb->csum does not include the inner Ethernet header.
435 * - The layer pointers are undefined.
437 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
441 skb_reset_mac_header(skb);
444 if (likely(ntohs(eh->h_proto) >= 1536))
445 skb->protocol = eh->h_proto;
447 skb->protocol = htons(ETH_P_802_2);
451 skb_clear_rxhash(skb);
454 ecn_decapsulate(skb, tos);
455 vlan_set_tci(skb, 0);
457 if (unlikely(compute_ip_summed(skb, false))) {
462 vport_receive(vport, skb);
465 static bool check_ipv4_address(__be32 addr)
467 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
468 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
474 static bool ipv4_should_icmp(struct sk_buff *skb)
476 struct iphdr *old_iph = ip_hdr(skb);
478 /* Don't respond to L2 broadcast. */
479 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
482 /* Don't respond to L3 broadcast or invalid addresses. */
483 if (!check_ipv4_address(old_iph->daddr) ||
484 !check_ipv4_address(old_iph->saddr))
487 /* Only respond to the first fragment. */
488 if (old_iph->frag_off & htons(IP_OFFSET))
491 /* Don't respond to ICMP error messages. */
492 if (old_iph->protocol == IPPROTO_ICMP) {
493 u8 icmp_type, *icmp_typep;
495 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
496 (old_iph->ihl << 2) +
497 offsetof(struct icmphdr, type) -
498 skb->data, sizeof(icmp_type),
504 if (*icmp_typep > NR_ICMP_TYPES
505 || (*icmp_typep <= ICMP_PARAMETERPROB
506 && *icmp_typep != ICMP_ECHOREPLY
507 && *icmp_typep != ICMP_ECHO))
514 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
515 unsigned int mtu, unsigned int payload_length)
517 struct iphdr *iph, *old_iph = ip_hdr(skb);
518 struct icmphdr *icmph;
521 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
522 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
523 payload = skb_put(nskb, payload_length);
527 iph->ihl = sizeof(struct iphdr) >> 2;
528 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
529 IPTOS_PREC_INTERNETCONTROL;
530 iph->tot_len = htons(sizeof(struct iphdr)
531 + sizeof(struct icmphdr)
533 get_random_bytes(&iph->id, sizeof(iph->id));
536 iph->protocol = IPPROTO_ICMP;
537 iph->daddr = old_iph->saddr;
538 iph->saddr = old_iph->daddr;
543 icmph->type = ICMP_DEST_UNREACH;
544 icmph->code = ICMP_FRAG_NEEDED;
545 icmph->un.gateway = htonl(mtu);
548 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
549 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
550 payload, payload_length,
552 icmph->checksum = csum_fold(nskb->csum);
555 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
556 static bool ipv6_should_icmp(struct sk_buff *skb)
558 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
560 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
561 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
563 /* Check source address is valid. */
564 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
565 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
568 /* Don't reply to unspecified addresses. */
569 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
572 /* Don't respond to ICMP error messages. */
573 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
577 if (nexthdr == NEXTHDR_ICMP) {
578 u8 icmp_type, *icmp_typep;
580 icmp_typep = skb_header_pointer(skb, payload_off +
581 offsetof(struct icmp6hdr,
583 sizeof(icmp_type), &icmp_type);
585 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
592 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
593 unsigned int mtu, unsigned int payload_length)
595 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
596 struct icmp6hdr *icmp6h;
599 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
600 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
601 payload = skb_put(nskb, payload_length);
606 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
607 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
609 ipv6h->nexthdr = NEXTHDR_ICMP;
610 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
611 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
612 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
615 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
616 icmp6h->icmp6_code = 0;
617 icmp6h->icmp6_cksum = 0;
618 icmp6h->icmp6_mtu = htonl(mtu);
620 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
621 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
622 payload, payload_length,
624 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
625 sizeof(struct icmp6hdr)
627 ipv6h->nexthdr, nskb->csum);
631 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
632 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
634 unsigned int eth_hdr_len = ETH_HLEN;
635 unsigned int total_length = 0, header_length = 0, payload_length;
636 struct ethhdr *eh, *old_eh = eth_hdr(skb);
637 struct sk_buff *nskb;
640 if (skb->protocol == htons(ETH_P_IP)) {
641 if (mtu < IP_MIN_MTU)
644 if (!ipv4_should_icmp(skb))
647 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
648 else if (skb->protocol == htons(ETH_P_IPV6)) {
649 if (mtu < IPV6_MIN_MTU)
653 * In theory we should do PMTUD on IPv6 multicast messages but
654 * we don't have an address to send from so just fragment.
656 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
659 if (!ipv6_should_icmp(skb))
667 if (old_eh->h_proto == htons(ETH_P_8021Q))
668 eth_hdr_len = VLAN_ETH_HLEN;
670 payload_length = skb->len - eth_hdr_len;
671 if (skb->protocol == htons(ETH_P_IP)) {
672 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
673 total_length = min_t(unsigned int, header_length +
674 payload_length, 576);
676 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
678 header_length = sizeof(struct ipv6hdr) +
679 sizeof(struct icmp6hdr);
680 total_length = min_t(unsigned int, header_length +
681 payload_length, IPV6_MIN_MTU);
685 payload_length = total_length - header_length;
687 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
692 skb_reserve(nskb, NET_IP_ALIGN);
694 /* Ethernet / VLAN */
695 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
696 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
697 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
698 nskb->protocol = eh->h_proto = old_eh->h_proto;
699 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
700 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
702 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
703 vh->h_vlan_encapsulated_proto = skb->protocol;
705 vlan_set_tci(nskb, vlan_get_tci(skb));
706 skb_reset_mac_header(nskb);
709 if (skb->protocol == htons(ETH_P_IP))
710 ipv4_build_icmp(skb, nskb, mtu, payload_length);
711 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
713 ipv6_build_icmp(skb, nskb, mtu, payload_length);
717 * Assume that flow based keys are symmetric with respect to input
718 * and output and use the key that we were going to put on the
719 * outgoing packet for the fake received packet. If the keys are
720 * not symmetric then PMTUD needs to be disabled since we won't have
721 * any way of synthesizing packets.
723 if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
724 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
725 OVS_CB(nskb)->tun_id = flow_key;
727 if (unlikely(compute_ip_summed(nskb, false))) {
732 vport_receive(vport, nskb);
737 static bool check_mtu(struct sk_buff *skb,
739 const struct tnl_mutable_config *mutable,
740 const struct rtable *rt, __be16 *frag_offp)
742 bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
743 bool pmtud = mutable->flags & TNL_F_PMTUD;
744 __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
746 unsigned int packet_length = skb->len - ETH_HLEN;
748 /* Allow for one level of tagging in the packet length. */
749 if (!vlan_tx_tag_present(skb) &&
750 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
751 packet_length -= VLAN_HLEN;
756 /* The tag needs to go in packet regardless of where it
757 * currently is, so subtract it from the MTU.
759 if (vlan_tx_tag_present(skb) ||
760 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
761 vlan_header = VLAN_HLEN;
763 mtu = dst_mtu(&rt_dst(rt))
765 - mutable->tunnel_hlen
769 if (skb->protocol == htons(ETH_P_IP)) {
770 struct iphdr *iph = ip_hdr(skb);
773 frag_off = iph->frag_off & htons(IP_DF);
775 if (pmtud && iph->frag_off & htons(IP_DF)) {
776 mtu = max(mtu, IP_MIN_MTU);
778 if (packet_length > mtu &&
779 tnl_frag_needed(vport, mutable, skb, mtu,
780 OVS_CB(skb)->tun_id))
784 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
785 else if (skb->protocol == htons(ETH_P_IPV6)) {
786 /* IPv6 requires end hosts to do fragmentation
787 * if the packet is above the minimum MTU.
789 if (df_inherit && packet_length > IPV6_MIN_MTU)
790 frag_off = htons(IP_DF);
793 mtu = max(mtu, IPV6_MIN_MTU);
795 if (packet_length > mtu &&
796 tnl_frag_needed(vport, mutable, skb, mtu,
797 OVS_CB(skb)->tun_id))
803 *frag_offp = frag_off;
807 static void create_tunnel_header(const struct vport *vport,
808 const struct tnl_mutable_config *mutable,
809 const struct rtable *rt, void *header)
811 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
812 struct iphdr *iph = header;
815 iph->ihl = sizeof(struct iphdr) >> 2;
816 iph->frag_off = htons(IP_DF);
817 iph->protocol = tnl_vport->tnl_ops->ipproto;
818 iph->tos = mutable->tos;
819 iph->daddr = rt->rt_dst;
820 iph->saddr = rt->rt_src;
821 iph->ttl = mutable->ttl;
823 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
825 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
828 static inline void *get_cached_header(const struct tnl_cache *cache)
830 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
833 static inline bool check_cache_valid(const struct tnl_cache *cache,
834 const struct tnl_mutable_config *mutable)
837 #ifdef NEED_CACHE_TIMEOUT
838 time_before(jiffies, cache->expiration) &&
841 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
844 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
846 mutable->seq == cache->mutable_seq &&
847 (!is_internal_dev(rt_dst(cache->rt).dev) ||
848 (cache->flow && !cache->flow->dead));
851 static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
853 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
854 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
855 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
857 if (cache && !check_cache_valid(cache, mutable) &&
858 spin_trylock_bh(&tnl_vport->cache_lock)) {
859 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
860 spin_unlock_bh(&tnl_vport->cache_lock);
866 static void cache_cleaner(struct work_struct *work)
868 schedule_cache_cleaner();
871 tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
875 static inline void create_eth_hdr(struct tnl_cache *cache,
876 const struct rtable *rt)
878 void *cache_data = get_cached_header(cache);
879 int hh_len = rt_dst(rt).hh->hh_len;
880 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
886 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
887 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
888 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
890 cache->hh_seq = hh_seq;
892 read_lock_bh(&rt_dst(rt).hh->hh_lock);
893 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
894 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
898 static struct tnl_cache *build_cache(struct vport *vport,
899 const struct tnl_mutable_config *mutable,
902 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
903 struct tnl_cache *cache;
907 if (!(mutable->flags & TNL_F_HDR_CACHE))
911 * If there is no entry in the ARP cache or if this device does not
912 * support hard header caching just fall back to the IP stack.
918 * If lock is contended fall back to directly building the header.
919 * We're not going to help performance by sitting here spinning.
921 if (!spin_trylock_bh(&tnl_vport->cache_lock))
924 cache = cache_dereference(tnl_vport);
925 if (check_cache_valid(cache, mutable))
930 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
932 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
933 cache_len, GFP_ATOMIC);
937 cache->len = cache_len;
939 create_eth_hdr(cache, rt);
940 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
942 create_tunnel_header(vport, mutable, rt, cache_data);
944 cache->mutable_seq = mutable->seq;
946 #ifdef NEED_CACHE_TIMEOUT
947 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
950 if (is_internal_dev(rt_dst(rt).dev)) {
951 struct sw_flow_key flow_key;
952 struct tbl_node *flow_node;
953 struct vport *dst_vport;
959 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
963 skb = alloc_skb(cache->len, GFP_ATOMIC);
967 __skb_put(skb, cache->len);
968 memcpy(skb->data, get_cached_header(cache), cache->len);
970 err = flow_extract(skb, dst_vport->port_no, &flow_key,
971 &flow_key_len, &is_frag);
977 flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
978 &flow_key, flow_key_len,
979 flow_hash(&flow_key, flow_key_len),
982 struct sw_flow *flow = flow_cast(flow_node);
990 assign_cache_rcu(vport, cache);
993 spin_unlock_bh(&tnl_vport->cache_lock);
998 static struct rtable *find_route(struct vport *vport,
999 const struct tnl_mutable_config *mutable,
1000 u8 tos, struct tnl_cache **cache)
1002 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1003 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1008 if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
1010 return cur_cache->rt;
1013 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
1014 struct flowi fl = { .nl_u = { .ip4_u =
1015 { .daddr = mutable->daddr,
1016 .saddr = mutable->saddr,
1018 .proto = tnl_vport->tnl_ops->ipproto };
1020 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
1023 struct flowi4 fl = { .daddr = mutable->daddr,
1024 .saddr = mutable->saddr,
1026 .flowi4_proto = tnl_vport->tnl_ops->ipproto };
1028 rt = ip_route_output_key(&init_net, &fl);
1033 if (likely(tos == mutable->tos))
1034 *cache = build_cache(vport, mutable, rt);
1040 static inline bool need_linearize(const struct sk_buff *skb)
1044 if (unlikely(skb_shinfo(skb)->frag_list))
1048 * Generally speaking we should linearize if there are paged frags.
1049 * However, if all of the refcounts are 1 we know nobody else can
1050 * change them from underneath us and we can skip the linearization.
1052 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1053 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
1059 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1060 const struct tnl_mutable_config *mutable,
1061 const struct rtable *rt)
1066 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1067 + mutable->tunnel_hlen
1068 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1070 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1071 int head_delta = SKB_DATA_ALIGN(min_headroom -
1074 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1080 forward_ip_summed(skb, true);
1082 if (skb_is_gso(skb)) {
1083 struct sk_buff *nskb;
1085 nskb = skb_gso_segment(skb, 0);
1088 err = PTR_ERR(nskb);
1094 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1095 /* Pages aren't locked and could change at any time.
1096 * If this happens after we compute the checksum, the
1097 * checksum will be wrong. We linearize now to avoid
1100 if (unlikely(need_linearize(skb))) {
1101 err = __skb_linearize(skb);
1106 err = skb_checksum_help(skb);
1111 set_ip_summed(skb, OVS_CSUM_NONE);
1118 return ERR_PTR(err);
1121 static int send_frags(struct sk_buff *skb,
1122 const struct tnl_mutable_config *mutable)
1128 struct sk_buff *next = skb->next;
1129 int frag_len = skb->len - mutable->tunnel_hlen;
1133 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1135 err = ip_local_out(skb);
1137 if (unlikely(net_xmit_eval(err)))
1139 sent_len += frag_len;
1146 * There's no point in continuing to send fragments once one has been
1147 * dropped so just free the rest. This may help improve the congestion
1148 * that caused the first packet to be dropped.
1150 tnl_free_linked_skbs(skb);
1154 int tnl_send(struct vport *vport, struct sk_buff *skb)
1156 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1157 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1159 enum vport_err_type err = VPORT_E_TX_ERROR;
1161 struct dst_entry *unattached_dst = NULL;
1162 struct tnl_cache *cache;
1164 __be16 frag_off = 0;
1169 /* Validate the protocol headers before we try to use them. */
1170 if (skb->protocol == htons(ETH_P_8021Q) &&
1171 !vlan_tx_tag_present(skb)) {
1172 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1175 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1176 skb_set_network_header(skb, VLAN_ETH_HLEN);
1179 if (skb->protocol == htons(ETH_P_IP)) {
1180 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1181 + sizeof(struct iphdr))))
1184 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1185 else if (skb->protocol == htons(ETH_P_IPV6)) {
1186 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1187 + sizeof(struct ipv6hdr))))
1193 if (skb->protocol == htons(ETH_P_IP))
1194 inner_tos = ip_hdr(skb)->tos;
1195 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1196 else if (skb->protocol == htons(ETH_P_IPV6))
1197 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1202 if (mutable->flags & TNL_F_TOS_INHERIT)
1207 tos = INET_ECN_encapsulate(tos, inner_tos);
1210 rt = find_route(vport, mutable, tos, &cache);
1213 if (unlikely(!cache))
1214 unattached_dst = &rt_dst(rt);
1220 skb_clear_rxhash(skb);
1223 skb = handle_offloads(skb, mutable, rt);
1228 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1229 err = VPORT_E_TX_DROPPED;
1234 * If we are over the MTU, allow the IP stack to handle fragmentation.
1235 * Fragmentation is a slow path anyways.
1237 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1239 unattached_dst = &rt_dst(rt);
1240 dst_hold(unattached_dst);
1247 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1249 if (mutable->flags & TNL_F_TTL_INHERIT) {
1250 if (skb->protocol == htons(ETH_P_IP))
1251 ttl = ip_hdr(skb)->ttl;
1252 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1253 else if (skb->protocol == htons(ETH_P_IPV6))
1254 ttl = ipv6_hdr(skb)->hop_limit;
1260 struct sk_buff *next_skb = skb->next;
1263 if (unlikely(vlan_deaccel_tag(skb)))
1266 if (likely(cache)) {
1267 skb_push(skb, cache->len);
1268 memcpy(skb->data, get_cached_header(cache), cache->len);
1269 skb_reset_mac_header(skb);
1270 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1273 skb_push(skb, mutable->tunnel_hlen);
1274 create_tunnel_header(vport, mutable, rt, skb->data);
1275 skb_reset_network_header(skb);
1278 skb_dst_set(skb, dst_clone(unattached_dst));
1280 skb_dst_set(skb, unattached_dst);
1281 unattached_dst = NULL;
1284 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1289 iph->frag_off = frag_off;
1290 ip_select_ident(iph, &rt_dst(rt), NULL);
1292 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1296 if (likely(cache)) {
1297 int orig_len = skb->len - cache->len;
1298 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1300 skb->protocol = htons(ETH_P_IP);
1302 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1306 if (unlikely(compute_ip_summed(skb, true))) {
1311 OVS_CB(skb)->flow = cache->flow;
1312 vport_receive(cache_vport, skb);
1313 sent_len += orig_len;
1317 skb->dev = rt_dst(rt).dev;
1318 xmit_err = dev_queue_xmit(skb);
1320 if (likely(net_xmit_eval(xmit_err) == 0))
1321 sent_len += orig_len;
1324 sent_len += send_frags(skb, mutable);
1330 if (unlikely(sent_len == 0))
1331 vport_record_error(vport, VPORT_E_TX_DROPPED);
1336 tnl_free_linked_skbs(skb);
1338 vport_record_error(vport, err);
1340 dst_release(unattached_dst);
1344 static const struct nla_policy tnl_policy[ODP_TUNNEL_ATTR_MAX + 1] = {
1345 [ODP_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1346 [ODP_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1347 [ODP_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1348 [ODP_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1349 [ODP_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1350 [ODP_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1351 [ODP_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1354 /* Sets ODP_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1355 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1356 const struct vport *cur_vport,
1357 struct tnl_mutable_config *mutable)
1359 const struct vport *old_vport;
1360 const struct tnl_mutable_config *old_mutable;
1361 struct nlattr *a[ODP_TUNNEL_ATTR_MAX + 1];
1367 err = nla_parse_nested(a, ODP_TUNNEL_ATTR_MAX, options, tnl_policy);
1371 if (!a[ODP_TUNNEL_ATTR_FLAGS] || !a[ODP_TUNNEL_ATTR_DST_IPV4])
1374 mutable->flags = nla_get_u32(a[ODP_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1376 if (a[ODP_TUNNEL_ATTR_SRC_IPV4])
1377 mutable->saddr = nla_get_be32(a[ODP_TUNNEL_ATTR_SRC_IPV4]);
1378 mutable->daddr = nla_get_be32(a[ODP_TUNNEL_ATTR_DST_IPV4]);
1380 if (a[ODP_TUNNEL_ATTR_TOS]) {
1381 mutable->tos = nla_get_u8(a[ODP_TUNNEL_ATTR_TOS]);
1382 if (mutable->tos != RT_TOS(mutable->tos))
1386 if (a[ODP_TUNNEL_ATTR_TTL])
1387 mutable->ttl = nla_get_u8(a[ODP_TUNNEL_ATTR_TTL]);
1389 mutable->tunnel_type = tnl_ops->tunnel_type;
1390 if (!a[ODP_TUNNEL_ATTR_IN_KEY]) {
1391 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1392 mutable->flags |= TNL_F_IN_KEY_MATCH;
1394 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1395 mutable->in_key = nla_get_be64(a[ODP_TUNNEL_ATTR_IN_KEY]);
1398 if (!a[ODP_TUNNEL_ATTR_OUT_KEY])
1399 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1401 mutable->out_key = nla_get_be64(a[ODP_TUNNEL_ATTR_OUT_KEY]);
1403 mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1404 if (mutable->tunnel_hlen < 0)
1405 return mutable->tunnel_hlen;
1407 mutable->tunnel_hlen += sizeof(struct iphdr);
1409 old_vport = tnl_find_port(mutable->saddr, mutable->daddr,
1410 mutable->in_key, mutable->tunnel_type,
1413 if (old_vport && old_vport != cur_vport)
1419 struct vport *tnl_create(const struct vport_parms *parms,
1420 const struct vport_ops *vport_ops,
1421 const struct tnl_ops *tnl_ops)
1423 struct vport *vport;
1424 struct tnl_vport *tnl_vport;
1425 struct tnl_mutable_config *mutable;
1426 int initial_frag_id;
1429 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1430 if (IS_ERR(vport)) {
1431 err = PTR_ERR(vport);
1435 tnl_vport = tnl_vport_priv(vport);
1437 strcpy(tnl_vport->name, parms->name);
1438 tnl_vport->tnl_ops = tnl_ops;
1440 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1443 goto error_free_vport;
1446 vport_gen_rand_ether_addr(mutable->eth_addr);
1448 get_random_bytes(&initial_frag_id, sizeof(int));
1449 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1451 err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1453 goto error_free_mutable;
1455 spin_lock_init(&tnl_vport->cache_lock);
1457 #ifdef NEED_CACHE_TIMEOUT
1458 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1459 (net_random() % (MAX_CACHE_EXP / 2));
1462 rcu_assign_pointer(tnl_vport->mutable, mutable);
1464 err = add_port(vport);
1466 goto error_free_mutable;
1475 return ERR_PTR(err);
1478 int tnl_set_options(struct vport *vport, struct nlattr *options)
1480 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1481 const struct tnl_mutable_config *old_mutable;
1482 struct tnl_mutable_config *mutable;
1485 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1491 /* Copy fields whose values should be retained. */
1492 old_mutable = rtnl_dereference(tnl_vport->mutable);
1493 mutable->seq = old_mutable->seq + 1;
1494 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1496 /* Parse the others configured by userspace. */
1497 err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1501 err = move_port(vport, mutable);
1513 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1515 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1516 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1518 NLA_PUT_U32(skb, ODP_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1519 NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_DST_IPV4, mutable->daddr);
1521 if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1522 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_IN_KEY, mutable->in_key);
1523 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1524 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1526 NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_SRC_IPV4, mutable->saddr);
1528 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TOS, mutable->tos);
1530 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TTL, mutable->ttl);
1538 static void free_port_rcu(struct rcu_head *rcu)
1540 struct tnl_vport *tnl_vport = container_of(rcu,
1541 struct tnl_vport, rcu);
1543 free_cache((struct tnl_cache __force *)tnl_vport->cache);
1544 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1545 vport_free(tnl_vport_to_vport(tnl_vport));
1548 int tnl_destroy(struct vport *vport)
1550 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1551 const struct tnl_mutable_config *mutable, *old_mutable;
1553 mutable = rtnl_dereference(tnl_vport->mutable);
1555 if (vport == tnl_find_port(mutable->saddr, mutable->daddr,
1556 mutable->in_key, mutable->tunnel_type,
1560 call_rcu(&tnl_vport->rcu, free_port_rcu);
1565 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1567 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1568 struct tnl_mutable_config *mutable;
1570 mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1571 sizeof(struct tnl_mutable_config), GFP_KERNEL);
1575 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1576 assign_config_rcu(vport, mutable);
1581 const char *tnl_get_name(const struct vport *vport)
1583 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1584 return tnl_vport->name;
1587 const unsigned char *tnl_get_addr(const struct vport *vport)
1589 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1590 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1593 void tnl_free_linked_skbs(struct sk_buff *skb)
1596 struct sk_buff *next = skb->next;