2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/if_arp.h>
22 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/igmp.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33 #include <linux/workqueue.h>
34 #include <linux/rculist.h>
36 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
41 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
44 #include <net/route.h>
52 #include "vport-generic.h"
53 #include "vport-internal_dev.h"
55 #ifdef NEED_CACHE_TIMEOUT
57 * On kernels where we can't quickly detect changes in the rest of the system
58 * we use an expiration time to invalidate the cache. A shorter expiration
59 * reduces the length of time that we may potentially blackhole packets while
60 * a longer time increases performance by reducing the frequency that the
61 * cache needs to be rebuilt. A variety of factors may cause the cache to be
62 * invalidated before the expiration time but this is the maximum. The time
63 * is expressed in jiffies.
65 #define MAX_CACHE_EXP HZ
69 * Interval to check for and remove caches that are no longer valid. Caches
70 * are checked for validity before they are used for packet encapsulation and
71 * old caches are removed at that time. However, if no packets are sent through
72 * the tunnel then the cache will never be destroyed. Since it holds
73 * references to a number of system objects, the cache will continue to use
74 * system resources by not allowing those objects to be destroyed. The cache
75 * cleaner is periodically run to free invalid caches. It does not
76 * significantly affect system performance. A lower interval will release
77 * resources faster but will itself consume resources by requiring more frequent
78 * checks. A longer interval may result in messages being printed to the kernel
79 * message buffer about unreleased resources. The interval is expressed in
82 #define CACHE_CLEANER_INTERVAL (5 * HZ)
84 #define CACHE_DATA_ALIGN 16
85 #define PORT_TABLE_SIZE 1024
87 static struct hlist_head *port_table __read_mostly;
88 static int port_table_count;
90 static void cache_cleaner(struct work_struct *work);
91 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
94 * These are just used as an optimization: they don't require any kind of
95 * synchronization because we could have just as easily read the value before
96 * the port change happened.
98 static unsigned int key_local_remote_ports __read_mostly;
99 static unsigned int key_remote_ports __read_mostly;
100 static unsigned int key_multicast_ports __read_mostly;
101 static unsigned int local_remote_ports __read_mostly;
102 static unsigned int remote_ports __read_mostly;
103 static unsigned int null_ports __read_mostly;
104 static unsigned int multicast_ports __read_mostly;
106 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
107 #define rt_dst(rt) (rt->dst)
109 #define rt_dst(rt) (rt->u.dst)
112 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
113 static struct hh_cache *rt_hh(struct rtable *rt)
115 struct neighbour *neigh = dst_get_neighbour_noref(&rt->dst);
116 if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
122 #define rt_hh(rt) (rt_dst(rt).hh)
125 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
127 return vport_from_priv(tnl_vport);
130 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
131 * cache_lock is held, so it is only for update side code.
133 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
135 return rcu_dereference_protected(tnl_vport->cache,
136 lockdep_is_held(&tnl_vport->cache_lock));
139 static void schedule_cache_cleaner(void)
141 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
144 static void free_cache(struct tnl_cache *cache)
149 ovs_flow_put(cache->flow);
150 ip_rt_put(cache->rt);
154 static void free_config_rcu(struct rcu_head *rcu)
156 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
160 static void free_cache_rcu(struct rcu_head *rcu)
162 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
166 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
167 * within an RCU callback. Fortunately this part doesn't require waiting for
168 * an RCU grace period.
170 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
173 if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
174 struct in_device *in_dev;
175 in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
177 ip_mc_dec_group(in_dev, mutable->key.daddr);
181 static void assign_config_rcu(struct vport *vport,
182 struct tnl_mutable_config *new_config)
184 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
185 struct tnl_mutable_config *old_config;
187 old_config = rtnl_dereference(tnl_vport->mutable);
188 rcu_assign_pointer(tnl_vport->mutable, new_config);
190 free_mutable_rtnl(old_config);
191 call_rcu(&old_config->rcu, free_config_rcu);
194 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
196 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
197 struct tnl_cache *old_cache;
199 old_cache = cache_dereference(tnl_vport);
200 rcu_assign_pointer(tnl_vport->cache, new_cache);
203 call_rcu(&old_cache->rcu, free_cache_rcu);
206 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
208 bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
210 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
211 if (mutable->key.saddr)
212 return &local_remote_ports;
213 else if (is_multicast)
214 return &multicast_ports;
216 return &remote_ports;
218 if (mutable->key.saddr)
219 return &key_local_remote_ports;
220 else if (is_multicast)
221 return &key_multicast_ports;
222 else if (mutable->key.daddr)
223 return &key_remote_ports;
229 static u32 port_hash(const struct port_lookup_key *key)
231 return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
234 static struct hlist_head *find_bucket(u32 hash)
236 return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
239 static void port_table_add_port(struct vport *vport)
241 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
242 const struct tnl_mutable_config *mutable;
245 if (port_table_count == 0)
246 schedule_cache_cleaner();
248 mutable = rtnl_dereference(tnl_vport->mutable);
249 hash = port_hash(&mutable->key);
250 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
253 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
256 static void port_table_move_port(struct vport *vport,
257 struct tnl_mutable_config *new_mutable)
259 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
262 hash = port_hash(&new_mutable->key);
263 hlist_del_init_rcu(&tnl_vport->hash_node);
264 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
266 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
267 assign_config_rcu(vport, new_mutable);
268 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
271 static void port_table_remove_port(struct vport *vport)
273 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
275 hlist_del_init_rcu(&tnl_vport->hash_node);
278 if (port_table_count == 0)
279 cancel_delayed_work_sync(&cache_cleaner_wq);
281 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
284 static struct vport *port_table_lookup(struct port_lookup_key *key,
285 const struct tnl_mutable_config **pmutable)
287 struct hlist_node *n;
288 struct hlist_head *bucket;
289 u32 hash = port_hash(key);
290 struct tnl_vport *tnl_vport;
292 bucket = find_bucket(hash);
294 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
295 struct tnl_mutable_config *mutable;
297 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
298 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
300 return tnl_vport_to_vport(tnl_vport);
307 struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
308 __be64 key, int tunnel_type,
309 const struct tnl_mutable_config **mutable)
311 struct port_lookup_key lookup;
313 bool is_multicast = ipv4_is_multicast(saddr);
315 port_key_set_net(&lookup, net);
316 lookup.saddr = saddr;
317 lookup.daddr = daddr;
319 /* First try for exact match on in_key. */
321 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
322 if (!is_multicast && key_local_remote_ports) {
323 vport = port_table_lookup(&lookup, mutable);
327 if (key_remote_ports) {
329 vport = port_table_lookup(&lookup, mutable);
333 lookup.saddr = saddr;
336 /* Then try matches that wildcard in_key. */
338 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
339 if (!is_multicast && local_remote_ports) {
340 vport = port_table_lookup(&lookup, mutable);
346 vport = port_table_lookup(&lookup, mutable);
353 lookup.daddr = saddr;
354 if (key_multicast_ports) {
355 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
357 vport = port_table_lookup(&lookup, mutable);
361 if (multicast_ports) {
362 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
364 vport = port_table_lookup(&lookup, mutable);
373 lookup.tunnel_type = tunnel_type;
374 vport = port_table_lookup(&lookup, mutable);
381 static void ecn_decapsulate(struct sk_buff *skb)
383 if (unlikely(INET_ECN_is_ce(OVS_CB(skb)->tun_key->ipv4_tos))) {
384 __be16 protocol = skb->protocol;
386 skb_set_network_header(skb, ETH_HLEN);
388 if (protocol == htons(ETH_P_8021Q)) {
389 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
392 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
393 skb_set_network_header(skb, VLAN_ETH_HLEN);
396 if (protocol == htons(ETH_P_IP)) {
397 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
398 + sizeof(struct iphdr))))
401 IP_ECN_set_ce(ip_hdr(skb));
403 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
404 else if (protocol == htons(ETH_P_IPV6)) {
405 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
406 + sizeof(struct ipv6hdr))))
409 IP6_ECN_set_ce(ipv6_hdr(skb));
416 * ovs_tnl_rcv - ingress point for generic tunnel code
418 * @vport: port this packet was received on
419 * @skb: received packet
420 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
422 * Must be called with rcu_read_lock.
424 * Packets received by this function are in the following state:
425 * - skb->data points to the inner Ethernet header.
426 * - The inner Ethernet header is in the linear data area.
427 * - skb->csum does not include the inner Ethernet header.
428 * - The layer pointers are undefined.
430 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
434 skb_reset_mac_header(skb);
437 if (likely(ntohs(eh->h_proto) >= 1536))
438 skb->protocol = eh->h_proto;
440 skb->protocol = htons(ETH_P_802_2);
444 skb_clear_rxhash(skb);
447 ecn_decapsulate(skb);
448 vlan_set_tci(skb, 0);
450 if (unlikely(compute_ip_summed(skb, false))) {
455 ovs_vport_receive(vport, skb);
458 static bool check_ipv4_address(__be32 addr)
460 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
461 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
467 static bool ipv4_should_icmp(struct sk_buff *skb)
469 struct iphdr *old_iph = ip_hdr(skb);
471 /* Don't respond to L2 broadcast. */
472 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
475 /* Don't respond to L3 broadcast or invalid addresses. */
476 if (!check_ipv4_address(old_iph->daddr) ||
477 !check_ipv4_address(old_iph->saddr))
480 /* Only respond to the first fragment. */
481 if (old_iph->frag_off & htons(IP_OFFSET))
484 /* Don't respond to ICMP error messages. */
485 if (old_iph->protocol == IPPROTO_ICMP) {
486 u8 icmp_type, *icmp_typep;
488 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
489 (old_iph->ihl << 2) +
490 offsetof(struct icmphdr, type) -
491 skb->data, sizeof(icmp_type),
497 if (*icmp_typep > NR_ICMP_TYPES
498 || (*icmp_typep <= ICMP_PARAMETERPROB
499 && *icmp_typep != ICMP_ECHOREPLY
500 && *icmp_typep != ICMP_ECHO))
507 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
508 unsigned int mtu, unsigned int payload_length)
510 struct iphdr *iph, *old_iph = ip_hdr(skb);
511 struct icmphdr *icmph;
514 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
515 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
516 payload = skb_put(nskb, payload_length);
520 iph->ihl = sizeof(struct iphdr) >> 2;
521 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
522 IPTOS_PREC_INTERNETCONTROL;
523 iph->tot_len = htons(sizeof(struct iphdr)
524 + sizeof(struct icmphdr)
526 get_random_bytes(&iph->id, sizeof(iph->id));
529 iph->protocol = IPPROTO_ICMP;
530 iph->daddr = old_iph->saddr;
531 iph->saddr = old_iph->daddr;
536 icmph->type = ICMP_DEST_UNREACH;
537 icmph->code = ICMP_FRAG_NEEDED;
538 icmph->un.gateway = htonl(mtu);
541 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
542 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
543 payload, payload_length,
545 icmph->checksum = csum_fold(nskb->csum);
548 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
549 static bool ipv6_should_icmp(struct sk_buff *skb)
551 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
553 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
554 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
557 /* Check source address is valid. */
558 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
559 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
562 /* Don't reply to unspecified addresses. */
563 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
566 /* Don't respond to ICMP error messages. */
567 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
571 if (nexthdr == NEXTHDR_ICMP) {
572 u8 icmp_type, *icmp_typep;
574 icmp_typep = skb_header_pointer(skb, payload_off +
575 offsetof(struct icmp6hdr,
577 sizeof(icmp_type), &icmp_type);
579 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
586 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
587 unsigned int mtu, unsigned int payload_length)
589 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
590 struct icmp6hdr *icmp6h;
593 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
594 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
595 payload = skb_put(nskb, payload_length);
600 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
601 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
603 ipv6h->nexthdr = NEXTHDR_ICMP;
604 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
605 ipv6h->daddr = old_ipv6h->saddr;
606 ipv6h->saddr = old_ipv6h->daddr;
609 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
610 icmp6h->icmp6_code = 0;
611 icmp6h->icmp6_cksum = 0;
612 icmp6h->icmp6_mtu = htonl(mtu);
614 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
615 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
616 payload, payload_length,
618 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
619 sizeof(struct icmp6hdr)
621 ipv6h->nexthdr, nskb->csum);
625 bool ovs_tnl_frag_needed(struct vport *vport,
626 const struct tnl_mutable_config *mutable,
627 struct sk_buff *skb, unsigned int mtu)
629 unsigned int eth_hdr_len = ETH_HLEN;
630 unsigned int total_length = 0, header_length = 0, payload_length;
631 struct ethhdr *eh, *old_eh = eth_hdr(skb);
632 struct sk_buff *nskb;
635 if (skb->protocol == htons(ETH_P_IP)) {
636 if (mtu < IP_MIN_MTU)
639 if (!ipv4_should_icmp(skb))
642 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
643 else if (skb->protocol == htons(ETH_P_IPV6)) {
644 if (mtu < IPV6_MIN_MTU)
648 * In theory we should do PMTUD on IPv6 multicast messages but
649 * we don't have an address to send from so just fragment.
651 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
654 if (!ipv6_should_icmp(skb))
662 if (old_eh->h_proto == htons(ETH_P_8021Q))
663 eth_hdr_len = VLAN_ETH_HLEN;
665 payload_length = skb->len - eth_hdr_len;
666 if (skb->protocol == htons(ETH_P_IP)) {
667 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
668 total_length = min_t(unsigned int, header_length +
669 payload_length, 576);
671 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
673 header_length = sizeof(struct ipv6hdr) +
674 sizeof(struct icmp6hdr);
675 total_length = min_t(unsigned int, header_length +
676 payload_length, IPV6_MIN_MTU);
680 payload_length = total_length - header_length;
682 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
687 skb_reserve(nskb, NET_IP_ALIGN);
689 /* Ethernet / VLAN */
690 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
691 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
692 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
693 nskb->protocol = eh->h_proto = old_eh->h_proto;
694 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
695 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
697 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
698 vh->h_vlan_encapsulated_proto = skb->protocol;
700 vlan_set_tci(nskb, vlan_get_tci(skb));
701 skb_reset_mac_header(nskb);
704 if (skb->protocol == htons(ETH_P_IP))
705 ipv4_build_icmp(skb, nskb, mtu, payload_length);
706 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
708 ipv6_build_icmp(skb, nskb, mtu, payload_length);
711 if (unlikely(compute_ip_summed(nskb, false))) {
716 ovs_vport_receive(vport, nskb);
721 static bool check_mtu(struct sk_buff *skb,
723 const struct tnl_mutable_config *mutable,
724 const struct rtable *rt, __be16 *frag_offp,
731 unsigned int packet_length = skb->len - ETH_HLEN;
733 if (OVS_CB(skb)->tun_key->ipv4_dst) {
736 frag_off = OVS_CB(skb)->tun_key->tun_flags & OVS_FLOW_TNL_F_DONT_FRAGMENT ?
739 df_inherit = mutable->flags & TNL_F_DF_INHERIT;
740 pmtud = mutable->flags & TNL_F_PMTUD;
741 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
744 /* Allow for one level of tagging in the packet length. */
745 if (!vlan_tx_tag_present(skb) &&
746 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
747 packet_length -= VLAN_HLEN;
752 /* The tag needs to go in packet regardless of where it
753 * currently is, so subtract it from the MTU.
755 if (vlan_tx_tag_present(skb) ||
756 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
757 vlan_header = VLAN_HLEN;
759 mtu = dst_mtu(&rt_dst(rt))
765 if (skb->protocol == htons(ETH_P_IP)) {
766 struct iphdr *iph = ip_hdr(skb);
769 frag_off = iph->frag_off & htons(IP_DF);
771 if (pmtud && iph->frag_off & htons(IP_DF)) {
772 mtu = max(mtu, IP_MIN_MTU);
774 if (packet_length > mtu &&
775 ovs_tnl_frag_needed(vport, mutable, skb, mtu))
779 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
780 else if (skb->protocol == htons(ETH_P_IPV6)) {
781 /* IPv6 requires end hosts to do fragmentation
782 * if the packet is above the minimum MTU.
784 if (df_inherit && packet_length > IPV6_MIN_MTU)
785 frag_off = htons(IP_DF);
788 mtu = max(mtu, IPV6_MIN_MTU);
790 if (packet_length > mtu &&
791 ovs_tnl_frag_needed(vport, mutable, skb, mtu))
797 *frag_offp = frag_off;
801 static void create_tunnel_header(const struct vport *vport,
802 const struct tnl_mutable_config *mutable,
803 const struct ovs_key_ipv4_tunnel *tun_key,
804 const struct rtable *rt, void *header)
806 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
807 struct iphdr *iph = header;
810 iph->ihl = sizeof(struct iphdr) >> 2;
811 iph->frag_off = htons(IP_DF);
812 iph->protocol = tnl_vport->tnl_ops->ipproto;
813 iph->tos = mutable->tos;
814 iph->daddr = rt->rt_dst;
815 iph->saddr = rt->rt_src;
816 iph->ttl = mutable->ttl;
818 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
820 tnl_vport->tnl_ops->build_header(vport, mutable, tun_key, iph + 1);
823 static void *get_cached_header(const struct tnl_cache *cache)
825 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
829 static inline int rt_genid(struct net *net)
831 return atomic_read(&net->ipv4.rt_genid);
835 static bool check_cache_valid(const struct tnl_cache *cache,
836 const struct tnl_mutable_config *mutable)
843 hh = rt_hh(cache->rt);
845 #ifdef NEED_CACHE_TIMEOUT
846 time_before(jiffies, cache->expiration) &&
849 rt_genid(dev_net(rt_dst(cache->rt).dev)) == cache->rt->rt_genid &&
852 hh->hh_lock.sequence == cache->hh_seq &&
854 mutable->seq == cache->mutable_seq &&
855 (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
856 (cache->flow && !cache->flow->dead));
859 static void __cache_cleaner(struct tnl_vport *tnl_vport)
861 const struct tnl_mutable_config *mutable =
862 rcu_dereference(tnl_vport->mutable);
863 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
865 if (cache && !check_cache_valid(cache, mutable) &&
866 spin_trylock_bh(&tnl_vport->cache_lock)) {
867 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
868 spin_unlock_bh(&tnl_vport->cache_lock);
872 static void cache_cleaner(struct work_struct *work)
876 schedule_cache_cleaner();
879 for (i = 0; i < PORT_TABLE_SIZE; i++) {
880 struct hlist_node *n;
881 struct hlist_head *bucket;
882 struct tnl_vport *tnl_vport;
884 bucket = &port_table[i];
885 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
886 __cache_cleaner(tnl_vport);
891 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
893 void *cache_data = get_cached_header(cache);
900 hh_seq = read_seqbegin(&hh->hh_lock);
901 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
902 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
903 cache->hh_len = hh->hh_len;
904 } while (read_seqretry(&hh->hh_lock, hh_seq));
906 cache->hh_seq = hh_seq;
908 read_lock(&hh->hh_lock);
909 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
910 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
911 cache->hh_len = hh->hh_len;
912 read_unlock(&hh->hh_lock);
916 static struct tnl_cache *build_cache(struct vport *vport,
917 const struct tnl_mutable_config *mutable,
920 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
921 static const struct ovs_key_ipv4_tunnel tun_key;
922 struct tnl_cache *cache;
928 if (!(mutable->flags & TNL_F_HDR_CACHE))
931 tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, &tun_key);
935 tunnel_hlen += sizeof(struct iphdr);
938 * If there is no entry in the ARP cache or if this device does not
939 * support hard header caching just fall back to the IP stack.
947 * If lock is contended fall back to directly building the header.
948 * We're not going to help performance by sitting here spinning.
950 if (!spin_trylock(&tnl_vport->cache_lock))
953 cache = cache_dereference(tnl_vport);
954 if (check_cache_valid(cache, mutable))
959 cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + tunnel_hlen;
961 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
962 cache_len, GFP_ATOMIC);
966 create_eth_hdr(cache, hh);
967 cache_data = get_cached_header(cache) + cache->hh_len;
968 cache->len = cache->hh_len + tunnel_hlen;
970 create_tunnel_header(vport, mutable, &tun_key, rt, cache_data);
972 cache->mutable_seq = mutable->seq;
974 #ifdef NEED_CACHE_TIMEOUT
975 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
978 if (ovs_is_internal_dev(rt_dst(rt).dev)) {
979 struct sw_flow_key flow_key;
980 struct vport *dst_vport;
984 struct sw_flow *flow;
986 dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
990 skb = alloc_skb(cache->len, GFP_ATOMIC);
994 __skb_put(skb, cache->len);
995 memcpy(skb->data, get_cached_header(cache), cache->len);
997 err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
1004 flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
1005 &flow_key, flow_key_len);
1008 ovs_flow_hold(flow);
1013 assign_cache_rcu(vport, cache);
1016 spin_unlock(&tnl_vport->cache_lock);
1021 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
1022 __be32 saddr, __be32 daddr, u8 ipproto,
1025 /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
1026 * router expect RT_TOS bits only. */
1028 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
1029 struct flowi fl = { .nl_u = { .ip4_u = {
1032 .tos = RT_TOS(tos) } },
1036 if (unlikely(ip_route_output_key(port_key_get_net(&mutable->key), &rt, &fl)))
1037 return ERR_PTR(-EADDRNOTAVAIL);
1041 struct flowi4 fl = { .daddr = daddr,
1043 .flowi4_tos = RT_TOS(tos),
1044 .flowi4_proto = ipproto };
1046 return ip_route_output_key(port_key_get_net(&mutable->key), &fl);
1050 static struct rtable *find_route(struct vport *vport,
1051 const struct tnl_mutable_config *mutable,
1052 __be32 saddr, __be32 daddr, u8 tos,
1053 struct tnl_cache **cache)
1055 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1056 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1061 if (tos == RT_TOS(mutable->tos) &&
1062 check_cache_valid(cur_cache, mutable)) {
1064 return cur_cache->rt;
1068 rt = __find_route(mutable, saddr, daddr,
1069 tnl_vport->tnl_ops->ipproto, tos);
1072 if (likely(tos == RT_TOS(mutable->tos)))
1073 *cache = build_cache(vport, mutable, rt);
1079 static bool need_linearize(const struct sk_buff *skb)
1083 if (unlikely(skb_shinfo(skb)->frag_list))
1087 * Generally speaking we should linearize if there are paged frags.
1088 * However, if all of the refcounts are 1 we know nobody else can
1089 * change them from underneath us and we can skip the linearization.
1091 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1092 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1098 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1099 const struct tnl_mutable_config *mutable,
1100 const struct rtable *rt,
1106 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1108 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1110 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1111 int head_delta = SKB_DATA_ALIGN(min_headroom -
1114 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1120 forward_ip_summed(skb, true);
1122 if (skb_is_gso(skb)) {
1123 struct sk_buff *nskb;
1125 nskb = skb_gso_segment(skb, 0);
1128 err = PTR_ERR(nskb);
1134 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1135 /* Pages aren't locked and could change at any time.
1136 * If this happens after we compute the checksum, the
1137 * checksum will be wrong. We linearize now to avoid
1140 if (unlikely(need_linearize(skb))) {
1141 err = __skb_linearize(skb);
1146 err = skb_checksum_help(skb);
1151 set_ip_summed(skb, OVS_CSUM_NONE);
1158 return ERR_PTR(err);
1161 static int send_frags(struct sk_buff *skb,
1168 struct sk_buff *next = skb->next;
1169 int frag_len = skb->len - tunnel_hlen;
1173 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1175 err = ip_local_out(skb);
1177 if (unlikely(net_xmit_eval(err)))
1179 sent_len += frag_len;
1186 * There's no point in continuing to send fragments once one has been
1187 * dropped so just free the rest. This may help improve the congestion
1188 * that caused the first packet to be dropped.
1190 ovs_tnl_free_linked_skbs(skb);
1194 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
1196 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1197 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1198 enum vport_err_type err = VPORT_E_TX_ERROR;
1200 struct dst_entry *unattached_dst = NULL;
1201 struct tnl_cache *cache;
1202 struct ovs_key_ipv4_tunnel tun_key;
1205 __be16 frag_off = 0;
1211 /* Validate the protocol headers before we try to use them. */
1212 if (skb->protocol == htons(ETH_P_8021Q) &&
1213 !vlan_tx_tag_present(skb)) {
1214 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1217 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1218 skb_set_network_header(skb, VLAN_ETH_HLEN);
1221 if (skb->protocol == htons(ETH_P_IP)) {
1222 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1223 + sizeof(struct iphdr))))
1226 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1227 else if (skb->protocol == htons(ETH_P_IPV6)) {
1228 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1229 + sizeof(struct ipv6hdr))))
1234 /* If OVS_CB(skb)->tun_key is NULL, point it at the local tun_key here,
1237 if (!OVS_CB(skb)->tun_key) {
1238 memset(&tun_key, 0, sizeof(tun_key));
1239 OVS_CB(skb)->tun_key = &tun_key;
1242 tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, OVS_CB(skb)->tun_key);
1243 if (unlikely(tunnel_hlen < 0)) {
1244 err = VPORT_E_TX_DROPPED;
1247 tunnel_hlen += sizeof(struct iphdr);
1249 if (OVS_CB(skb)->tun_key->ipv4_dst) {
1250 daddr = OVS_CB(skb)->tun_key->ipv4_dst;
1251 saddr = OVS_CB(skb)->tun_key->ipv4_src;
1252 tos = OVS_CB(skb)->tun_key->ipv4_tos;
1253 ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
1256 daddr = mutable->key.daddr;
1257 saddr = mutable->key.saddr;
1259 if (unlikely(!daddr)) {
1260 /* Trying to sent packet from Null-port without
1261 * tunnel info? Drop this packet. */
1262 err = VPORT_E_TX_DROPPED;
1267 if (skb->protocol == htons(ETH_P_IP))
1268 inner_tos = ip_hdr(skb)->tos;
1269 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1270 else if (skb->protocol == htons(ETH_P_IPV6))
1271 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1276 if (mutable->flags & TNL_F_TOS_INHERIT)
1281 tos = INET_ECN_encapsulate(tos, inner_tos);
1285 if (mutable->flags & TNL_F_TTL_INHERIT) {
1286 if (skb->protocol == htons(ETH_P_IP))
1287 ttl = ip_hdr(skb)->ttl;
1288 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1289 else if (skb->protocol == htons(ETH_P_IPV6))
1290 ttl = ipv6_hdr(skb)->hop_limit;
1297 rt = find_route(vport, mutable, saddr, daddr, tos, &cache);
1300 if (unlikely(!cache))
1301 unattached_dst = &rt_dst(rt);
1307 skb_clear_rxhash(skb);
1310 skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
1315 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off, tunnel_hlen))) {
1316 err = VPORT_E_TX_DROPPED;
1321 * If we are over the MTU, allow the IP stack to handle fragmentation.
1322 * Fragmentation is a slow path anyways.
1324 if (unlikely(skb->len + tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1326 unattached_dst = &rt_dst(rt);
1327 dst_hold(unattached_dst);
1332 if (!OVS_CB(skb)->tun_key->ipv4_dst) {
1333 if (!(mutable->flags & TNL_F_TTL_INHERIT)) {
1335 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1341 struct sk_buff *next_skb = skb->next;
1344 if (unlikely(vlan_deaccel_tag(skb)))
1347 if (likely(cache)) {
1348 skb_push(skb, cache->len);
1349 memcpy(skb->data, get_cached_header(cache), cache->len);
1350 skb_reset_mac_header(skb);
1351 skb_set_network_header(skb, cache->hh_len);
1354 skb_push(skb, tunnel_hlen);
1355 create_tunnel_header(vport, mutable, OVS_CB(skb)->tun_key, rt, skb->data);
1356 skb_reset_network_header(skb);
1359 skb_dst_set(skb, dst_clone(unattached_dst));
1361 skb_dst_set(skb, unattached_dst);
1362 unattached_dst = NULL;
1365 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1370 iph->frag_off = frag_off;
1371 ip_select_ident(iph, &rt_dst(rt), NULL);
1373 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1374 &rt_dst(rt), skb, tunnel_hlen);
1378 if (likely(cache)) {
1379 int orig_len = skb->len - cache->len;
1380 struct vport *cache_vport;
1382 cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
1383 skb->protocol = htons(ETH_P_IP);
1385 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1389 if (unlikely(compute_ip_summed(skb, true))) {
1394 OVS_CB(skb)->flow = cache->flow;
1395 ovs_vport_receive(cache_vport, skb);
1396 sent_len += orig_len;
1400 skb->dev = rt_dst(rt).dev;
1401 xmit_err = dev_queue_xmit(skb);
1403 if (likely(net_xmit_eval(xmit_err) == 0))
1404 sent_len += orig_len;
1407 sent_len += send_frags(skb, tunnel_hlen);
1413 if (unlikely(sent_len == 0))
1414 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
1419 ovs_tnl_free_linked_skbs(skb);
1421 ovs_vport_record_error(vport, err);
1423 dst_release(unattached_dst);
1427 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1428 [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1429 [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1430 [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1431 [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1432 [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1433 [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1434 [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1437 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1439 static int tnl_set_config(struct net *net, struct nlattr *options,
1440 const struct tnl_ops *tnl_ops,
1441 const struct vport *cur_vport,
1442 struct tnl_mutable_config *mutable)
1444 const struct vport *old_vport;
1445 const struct tnl_mutable_config *old_mutable;
1446 struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1449 port_key_set_net(&mutable->key, net);
1450 mutable->key.tunnel_type = tnl_ops->tunnel_type;
1454 err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1458 if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1461 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1462 mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1464 if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1465 if (ipv4_is_multicast(mutable->key.daddr))
1467 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1470 if (a[OVS_TUNNEL_ATTR_TOS]) {
1471 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1472 /* Reject ToS config with ECN bits set. */
1473 if (mutable->tos & INET_ECN_MASK)
1477 if (a[OVS_TUNNEL_ATTR_TTL])
1478 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1480 if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1481 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1482 mutable->flags |= TNL_F_IN_KEY_MATCH;
1484 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1485 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1488 if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1489 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1491 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1494 if (ipv4_is_multicast(mutable->key.daddr)) {
1495 struct net_device *dev;
1498 rt = __find_route(mutable, mutable->key.saddr, mutable->key.daddr,
1499 tnl_ops->ipproto, mutable->tos);
1501 return -EADDRNOTAVAIL;
1502 dev = rt_dst(rt).dev;
1504 if (__in_dev_get_rtnl(dev) == NULL)
1505 return -EADDRNOTAVAIL;
1506 mutable->mlink = dev->ifindex;
1507 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1511 old_vport = port_table_lookup(&mutable->key, &old_mutable);
1512 if (old_vport && old_vport != cur_vport)
1518 struct vport *ovs_tnl_create(const struct vport_parms *parms,
1519 const struct vport_ops *vport_ops,
1520 const struct tnl_ops *tnl_ops)
1522 struct vport *vport;
1523 struct tnl_vport *tnl_vport;
1524 struct tnl_mutable_config *mutable;
1525 int initial_frag_id;
1528 vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1529 if (IS_ERR(vport)) {
1530 err = PTR_ERR(vport);
1534 tnl_vport = tnl_vport_priv(vport);
1536 strcpy(tnl_vport->name, parms->name);
1537 tnl_vport->tnl_ops = tnl_ops;
1539 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1542 goto error_free_vport;
1545 random_ether_addr(mutable->eth_addr);
1547 get_random_bytes(&initial_frag_id, sizeof(int));
1548 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1550 err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
1553 goto error_free_mutable;
1555 spin_lock_init(&tnl_vport->cache_lock);
1557 #ifdef NEED_CACHE_TIMEOUT
1558 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1559 (net_random() % (MAX_CACHE_EXP / 2));
1562 rcu_assign_pointer(tnl_vport->mutable, mutable);
1564 port_table_add_port(vport);
1568 free_mutable_rtnl(mutable);
1571 ovs_vport_free(vport);
1573 return ERR_PTR(err);
1576 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
1578 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1579 const struct tnl_mutable_config *old_mutable;
1580 struct tnl_mutable_config *mutable;
1583 old_mutable = rtnl_dereference(tnl_vport->mutable);
1584 if (!old_mutable->key.daddr)
1587 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1593 /* Copy fields whose values should be retained. */
1594 mutable->seq = old_mutable->seq + 1;
1595 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1597 /* Parse the others configured by userspace. */
1598 err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
1603 if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1604 port_table_move_port(vport, mutable);
1606 assign_config_rcu(vport, mutable);
1611 free_mutable_rtnl(mutable);
1617 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1619 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1620 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1622 if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
1623 mutable->flags & TNL_F_PUBLIC) ||
1624 nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
1625 goto nla_put_failure;
1627 if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
1628 nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
1629 goto nla_put_failure;
1630 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
1631 nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
1632 goto nla_put_failure;
1633 if (mutable->key.saddr &&
1634 nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
1635 goto nla_put_failure;
1636 if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
1637 goto nla_put_failure;
1638 if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
1639 goto nla_put_failure;
1647 static void free_port_rcu(struct rcu_head *rcu)
1649 struct tnl_vport *tnl_vport = container_of(rcu,
1650 struct tnl_vport, rcu);
1652 free_cache((struct tnl_cache __force *)tnl_vport->cache);
1653 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1654 ovs_vport_free(tnl_vport_to_vport(tnl_vport));
1657 void ovs_tnl_destroy(struct vport *vport)
1659 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1660 struct tnl_mutable_config *mutable;
1662 mutable = rtnl_dereference(tnl_vport->mutable);
1663 port_table_remove_port(vport);
1664 free_mutable_rtnl(mutable);
1665 call_rcu(&tnl_vport->rcu, free_port_rcu);
1668 int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
1670 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1671 struct tnl_mutable_config *old_mutable, *mutable;
1673 old_mutable = rtnl_dereference(tnl_vport->mutable);
1674 mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1678 old_mutable->mlink = 0;
1680 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1681 assign_config_rcu(vport, mutable);
1686 const char *ovs_tnl_get_name(const struct vport *vport)
1688 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1689 return tnl_vport->name;
1692 const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
1694 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1695 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1698 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
1701 struct sk_buff *next = skb->next;
1707 int ovs_tnl_init(void)
1711 port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1716 for (i = 0; i < PORT_TABLE_SIZE; i++)
1717 INIT_HLIST_HEAD(&port_table[i]);
1722 void ovs_tnl_exit(void)