X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Ftunnel.c;h=d651c11833885a573cc743d1a08bc6d2a40e001e;hb=5ca92d1d5dc0d8dba9ed554444cd0ae00a43209f;hp=100794e0d524b86e2c7c2cb4a2fb4b7589ddb8fa;hpb=f686a33af8cb41ee228e6a35410c9a488fba3eb1;p=openvswitch diff --git a/datapath/tunnel.c b/datapath/tunnel.c index 100794e0..d651c118 100644 --- a/datapath/tunnel.c +++ b/datapath/tunnel.c @@ -1,17 +1,31 @@ /* - * Copyright (c) 2010, 2011 Nicira Networks. - * Distributed under the terms of the GNU GPL version 2. + * Copyright (c) 2007-2012 Nicira, Inc. * - * Significant portions of this file may be copied from parts of the Linux - * kernel, by Linus Torvalds and others. + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include #include +#include #include #include +#include #include #include #include @@ -30,7 +44,6 @@ #include #include -#include "actions.h" #include "checksum.h" #include "datapath.h" #include "tunnel.h" @@ -84,8 +97,10 @@ static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner); */ static unsigned int key_local_remote_ports __read_mostly; static unsigned int key_remote_ports __read_mostly; +static unsigned int key_multicast_ports __read_mostly; static unsigned int local_remote_ports __read_mostly; static unsigned int remote_ports __read_mostly; +static unsigned int multicast_ports __read_mostly; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) #define rt_dst(rt) (rt->dst) @@ -93,7 +108,20 @@ static unsigned int remote_ports __read_mostly; #define rt_dst(rt) (rt->u.dst) #endif -static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) +static struct hh_cache *rt_hh(struct rtable *rt) +{ + struct neighbour *neigh = dst_get_neighbour_noref(&rt->dst); + if (!neigh || !(neigh->nud_state & NUD_CONNECTED) || + !neigh->hh.hh_len) + return NULL; + return &neigh->hh; +} +#else +#define rt_hh(rt) (rt_dst(rt).hh) +#endif + +static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport) { return vport_from_priv(tnl_vport); } @@ -101,13 +129,13 @@ static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport /* This is analogous to rtnl_dereference for the tunnel cache. It checks that * cache_lock is held, so it is only for update side code. */ -static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport) +static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport) { return rcu_dereference_protected(tnl_vport->cache, - lockdep_is_held(&tnl_vport->cache_lock)); + lockdep_is_held(&tnl_vport->cache_lock)); } -static inline void schedule_cache_cleaner(void) +static void schedule_cache_cleaner(void) { schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL); } @@ -117,7 +145,7 @@ static void free_cache(struct tnl_cache *cache) if (!cache) return; - flow_put(cache->flow); + ovs_flow_put(cache->flow); ip_rt_put(cache->rt); kfree(cache); } @@ -134,6 +162,21 @@ static void free_cache_rcu(struct rcu_head *rcu) free_cache(c); } +/* Frees the portion of 'mutable' that requires RTNL and thus can't happen + * within an RCU callback. Fortunately this part doesn't require waiting for + * an RCU grace period. + */ +static void free_mutable_rtnl(struct tnl_mutable_config *mutable) +{ + ASSERT_RTNL(); + if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) { + struct in_device *in_dev; + in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink); + if (in_dev) + ip_mc_dec_group(in_dev, mutable->key.daddr); + } +} + static void assign_config_rcu(struct vport *vport, struct tnl_mutable_config *new_config) { @@ -142,6 +185,8 @@ static void assign_config_rcu(struct vport *vport, old_config = rtnl_dereference(tnl_vport->mutable); rcu_assign_pointer(tnl_vport->mutable, new_config); + + free_mutable_rtnl(old_config); call_rcu(&old_config->rcu, free_config_rcu); } @@ -159,14 +204,20 @@ static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache) static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable) { + bool is_multicast = ipv4_is_multicast(mutable->key.daddr); + if (mutable->flags & TNL_F_IN_KEY_MATCH) { if (mutable->key.saddr) return &local_remote_ports; + else if (is_multicast) + return &multicast_ports; else return &remote_ports; } else { if (mutable->key.saddr) return &key_local_remote_ports; + else if (is_multicast) + return &key_multicast_ports; else return &key_remote_ports; } @@ -174,10 +225,10 @@ static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable) static u32 port_hash(const struct port_lookup_key *key) { - return jhash2((u32*)key, (sizeof(*key) / sizeof(u32)), 0); + return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0); } -static inline struct hlist_head *find_bucket(u32 hash) +static struct hlist_head *find_bucket(u32 hash) { return &port_table[(hash & (PORT_TABLE_SIZE - 1))]; } @@ -227,13 +278,13 @@ static void port_table_remove_port(struct vport *vport) (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--; } -static struct tnl_vport *port_table_lookup(struct port_lookup_key *key, - const struct tnl_mutable_config **pmutable) +static struct vport *port_table_lookup(struct port_lookup_key *key, + const struct tnl_mutable_config **pmutable) { struct hlist_node *n; struct hlist_head *bucket; u32 hash = port_hash(key); - struct tnl_vport * tnl_vport; + struct tnl_vport *tnl_vport; bucket = find_bucket(hash); @@ -241,60 +292,75 @@ static struct tnl_vport *port_table_lookup(struct port_lookup_key *key, struct tnl_mutable_config *mutable; mutable = rcu_dereference_rtnl(tnl_vport->mutable); - if (!memcmp(&mutable->key, key, sizeof(*key))) { + if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) { *pmutable = mutable; - return tnl_vport; + return tnl_vport_to_vport(tnl_vport); } } return NULL; } -struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key, - int tunnel_type, - const struct tnl_mutable_config **mutable) +struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr, + __be64 key, int tunnel_type, + const struct tnl_mutable_config **mutable) { struct port_lookup_key lookup; - struct tnl_vport * tnl_vport; + struct vport *vport; + bool is_multicast = ipv4_is_multicast(saddr); + port_key_set_net(&lookup, net); lookup.saddr = saddr; lookup.daddr = daddr; - if (tunnel_type & TNL_T_KEY_EXACT) { - lookup.in_key = key; - lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH; - - if (key_local_remote_ports) { - tnl_vport = port_table_lookup(&lookup, mutable); - if (tnl_vport) - return tnl_vport_to_vport(tnl_vport); - } - - if (key_remote_ports) { - lookup.saddr = 0; - tnl_vport = port_table_lookup(&lookup, mutable); - if (tnl_vport) - return tnl_vport_to_vport(tnl_vport); + /* First try for exact match on in_key. */ + lookup.in_key = key; + lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT; + if (!is_multicast && key_local_remote_ports) { + vport = port_table_lookup(&lookup, mutable); + if (vport) + return vport; + } + if (key_remote_ports) { + lookup.saddr = 0; + vport = port_table_lookup(&lookup, mutable); + if (vport) + return vport; - lookup.saddr = saddr; - } + lookup.saddr = saddr; } - if (tunnel_type & TNL_T_KEY_MATCH) { - lookup.in_key = 0; - lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT; + /* Then try matches that wildcard in_key. */ + lookup.in_key = 0; + lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH; + if (!is_multicast && local_remote_ports) { + vport = port_table_lookup(&lookup, mutable); + if (vport) + return vport; + } + if (remote_ports) { + lookup.saddr = 0; + vport = port_table_lookup(&lookup, mutable); + if (vport) + return vport; + } - if (local_remote_ports) { - tnl_vport = port_table_lookup(&lookup, mutable); - if (tnl_vport) - return tnl_vport_to_vport(tnl_vport); + if (is_multicast) { + lookup.saddr = 0; + lookup.daddr = saddr; + if (key_multicast_ports) { + lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT; + lookup.in_key = key; + vport = port_table_lookup(&lookup, mutable); + if (vport) + return vport; } - - if (remote_ports) { - lookup.saddr = 0; - tnl_vport = port_table_lookup(&lookup, mutable); - if (tnl_vport) - return tnl_vport_to_vport(tnl_vport); + if (multicast_ports) { + lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH; + lookup.in_key = 0; + vport = port_table_lookup(&lookup, mutable); + if (vport) + return vport; } } @@ -336,7 +402,7 @@ static void ecn_decapsulate(struct sk_buff *skb, u8 tos) } /** - * tnl_rcv - ingress point for generic tunnel code + * ovs_tnl_rcv - ingress point for generic tunnel code * * @vport: port this packet was received on * @skb: received packet @@ -350,7 +416,7 @@ static void ecn_decapsulate(struct sk_buff *skb, u8 tos) * - skb->csum does not include the inner Ethernet header. * - The layer pointers are undefined. */ -void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos) +void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos) { struct ethhdr *eh; @@ -375,7 +441,7 @@ void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos) return; } - vport_receive(vport, skb); + ovs_vport_receive(vport, skb); } static bool check_ipv4_address(__be32 addr) @@ -475,6 +541,7 @@ static bool ipv6_should_icmp(struct sk_buff *skb) int addr_type; int payload_off = (u8 *)(old_ipv6h + 1) - skb->data; u8 nexthdr = ipv6_hdr(skb)->nexthdr; + __be16 frag_off; /* Check source address is valid. */ addr_type = ipv6_addr_type(&old_ipv6h->saddr); @@ -486,7 +553,7 @@ static bool ipv6_should_icmp(struct sk_buff *skb) return false; /* Don't respond to ICMP error messages. */ - payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr); + payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off); if (payload_off < 0) return false; @@ -524,8 +591,8 @@ static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb, + payload_length); ipv6h->nexthdr = NEXTHDR_ICMP; ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT; - ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr); - ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr); + ipv6h->daddr = old_ipv6h->saddr; + ipv6h->saddr = old_ipv6h->daddr; /* ICMPv6 */ icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG; @@ -544,8 +611,9 @@ static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb, } #endif /* IPv6 */ -bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable, - struct sk_buff *skb, unsigned int mtu, __be64 flow_key) +bool ovs_tnl_frag_needed(struct vport *vport, + const struct tnl_mutable_config *mutable, + struct sk_buff *skb, unsigned int mtu, __be64 flow_key) { unsigned int eth_hdr_len = ETH_HLEN; unsigned int total_length = 0, header_length = 0, payload_length; @@ -645,7 +713,7 @@ bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutab return false; } - vport_receive(vport, nskb); + ovs_vport_receive(vport, nskb); return true; } @@ -692,8 +760,8 @@ static bool check_mtu(struct sk_buff *skb, mtu = max(mtu, IP_MIN_MTU); if (packet_length > mtu && - tnl_frag_needed(vport, mutable, skb, mtu, - OVS_CB(skb)->tun_id)) + ovs_tnl_frag_needed(vport, mutable, skb, mtu, + OVS_CB(skb)->tun_id)) return false; } } @@ -709,8 +777,8 @@ static bool check_mtu(struct sk_buff *skb, mtu = max(mtu, IPV6_MIN_MTU); if (packet_length > mtu && - tnl_frag_needed(vport, mutable, skb, mtu, - OVS_CB(skb)->tun_id)) + ovs_tnl_frag_needed(vport, mutable, skb, mtu, + OVS_CB(skb)->tun_id)) return false; } } @@ -741,26 +809,39 @@ static void create_tunnel_header(const struct vport *vport, tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1); } -static inline void *get_cached_header(const struct tnl_cache *cache) +static void *get_cached_header(const struct tnl_cache *cache) { return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN); } -static inline bool check_cache_valid(const struct tnl_cache *cache, - const struct tnl_mutable_config *mutable) +#ifdef HAVE_RT_GENID +static inline int rt_genid(struct net *net) +{ + return atomic_read(&net->ipv4.rt_genid); +} +#endif + +static bool check_cache_valid(const struct tnl_cache *cache, + const struct tnl_mutable_config *mutable) { - return cache && + struct hh_cache *hh; + + if (!cache) + return false; + + hh = rt_hh(cache->rt); + return hh && #ifdef NEED_CACHE_TIMEOUT time_before(jiffies, cache->expiration) && #endif #ifdef HAVE_RT_GENID - atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid && + rt_genid(dev_net(rt_dst(cache->rt).dev)) == cache->rt->rt_genid && #endif #ifdef HAVE_HH_SEQ - rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq && + hh->hh_lock.sequence == cache->hh_seq && #endif mutable->seq == cache->mutable_seq && - (!is_internal_dev(rt_dst(cache->rt).dev) || + (!ovs_is_internal_dev(rt_dst(cache->rt).dev) || (cache->flow && !cache->flow->dead)); } @@ -787,7 +868,7 @@ static void cache_cleaner(struct work_struct *work) for (i = 0; i < PORT_TABLE_SIZE; i++) { struct hlist_node *n; struct hlist_head *bucket; - struct tnl_vport *tnl_vport; + struct tnl_vport *tnl_vport; bucket = &port_table[i]; hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) @@ -796,26 +877,28 @@ static void cache_cleaner(struct work_struct *work) rcu_read_unlock(); } -static inline void create_eth_hdr(struct tnl_cache *cache, - const struct rtable *rt) +static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh) { void *cache_data = get_cached_header(cache); - int hh_len = rt_dst(rt).hh->hh_len; - int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len; + int hh_off; #ifdef HAVE_HH_SEQ unsigned hh_seq; do { - hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock); - memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len); - } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq)); + hh_seq = read_seqbegin(&hh->hh_lock); + hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len; + memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len); + cache->hh_len = hh->hh_len; + } while (read_seqretry(&hh->hh_lock, hh_seq)); cache->hh_seq = hh_seq; #else - read_lock_bh(&rt_dst(rt).hh->hh_lock); - memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len); - read_unlock_bh(&rt_dst(rt).hh->hh_lock); + read_lock(&hh->hh_lock); + hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len; + memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len); + cache->hh_len = hh->hh_len; + read_unlock(&hh->hh_lock); #endif } @@ -827,6 +910,7 @@ static struct tnl_cache *build_cache(struct vport *vport, struct tnl_cache *cache; void *cache_data; int cache_len; + struct hh_cache *hh; if (!(mutable->flags & TNL_F_HDR_CACHE)) return NULL; @@ -835,14 +919,16 @@ static struct tnl_cache *build_cache(struct vport *vport, * If there is no entry in the ARP cache or if this device does not * support hard header caching just fall back to the IP stack. */ - if (!rt_dst(rt).hh) + + hh = rt_hh(rt); + if (!hh) return NULL; /* * If lock is contended fall back to directly building the header. * We're not going to help performance by sitting here spinning. */ - if (!spin_trylock_bh(&tnl_vport->cache_lock)) + if (!spin_trylock(&tnl_vport->cache_lock)) return NULL; cache = cache_dereference(tnl_vport); @@ -851,17 +937,16 @@ static struct tnl_cache *build_cache(struct vport *vport, else cache = NULL; - cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen; + cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen; cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) + cache_len, GFP_ATOMIC); if (!cache) goto unlock; - cache->len = cache_len; - - create_eth_hdr(cache, rt); - cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len; + create_eth_hdr(cache, hh); + cache_data = get_cached_header(cache) + cache->hh_len; + cache->len = cache->hh_len + mutable->tunnel_hlen; create_tunnel_header(vport, mutable, rt, cache_data); @@ -871,16 +956,15 @@ static struct tnl_cache *build_cache(struct vport *vport, cache->expiration = jiffies + tnl_vport->cache_exp_interval; #endif - if (is_internal_dev(rt_dst(rt).dev)) { + if (ovs_is_internal_dev(rt_dst(rt).dev)) { struct sw_flow_key flow_key; struct vport *dst_vport; struct sk_buff *skb; - bool is_frag; int err; int flow_key_len; struct sw_flow *flow; - dst_vport = internal_dev_get_vport(rt_dst(rt).dev); + dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev); if (!dst_vport) goto done; @@ -891,18 +975,18 @@ static struct tnl_cache *build_cache(struct vport *vport, __skb_put(skb, cache->len); memcpy(skb->data, get_cached_header(cache), cache->len); - err = flow_extract(skb, dst_vport->port_no, &flow_key, - &flow_key_len, &is_frag); + err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key, + &flow_key_len); consume_skb(skb); - if (err || is_frag) + if (err) goto done; - flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table), - &flow_key, flow_key_len); + flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table), + &flow_key, flow_key_len); if (flow) { cache->flow = flow; - flow_hold(flow); + ovs_flow_hold(flow); } } @@ -910,11 +994,39 @@ done: assign_cache_rcu(vport, cache); unlock: - spin_unlock_bh(&tnl_vport->cache_lock); + spin_unlock(&tnl_vport->cache_lock); return cache; } +static struct rtable *__find_route(const struct tnl_mutable_config *mutable, + u8 ipproto, u8 tos) +{ + /* Tunnel configuration keeps DSCP part of TOS bits, But Linux + * router expect RT_TOS bits only. */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) + struct flowi fl = { .nl_u = { .ip4_u = { + .daddr = mutable->key.daddr, + .saddr = mutable->key.saddr, + .tos = RT_TOS(tos) } }, + .proto = ipproto }; + struct rtable *rt; + + if (unlikely(ip_route_output_key(port_key_get_net(&mutable->key), &rt, &fl))) + return ERR_PTR(-EADDRNOTAVAIL); + + return rt; +#else + struct flowi4 fl = { .daddr = mutable->key.daddr, + .saddr = mutable->key.saddr, + .flowi4_tos = RT_TOS(tos), + .flowi4_proto = ipproto }; + + return ip_route_output_key(port_key_get_net(&mutable->key), &fl); +#endif +} + static struct rtable *find_route(struct vport *vport, const struct tnl_mutable_config *mutable, u8 tos, struct tnl_cache **cache) @@ -925,39 +1037,25 @@ static struct rtable *find_route(struct vport *vport, *cache = NULL; tos = RT_TOS(tos); - if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) { + if (likely(tos == RT_TOS(mutable->tos) && + check_cache_valid(cur_cache, mutable))) { *cache = cur_cache; return cur_cache->rt; } else { struct rtable *rt; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) - struct flowi fl = { .nl_u = { .ip4_u = - { .daddr = mutable->key.daddr, - .saddr = mutable->key.saddr, - .tos = tos } }, - .proto = tnl_vport->tnl_ops->ipproto }; - - if (unlikely(ip_route_output_key(&init_net, &rt, &fl))) - return NULL; -#else - struct flowi4 fl = { .daddr = mutable->key.daddr, - .saddr = mutable->key.saddr, - .flowi4_tos = tos, - .flowi4_proto = tnl_vport->tnl_ops->ipproto }; - rt = ip_route_output_key(&init_net, &fl); + rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos); if (IS_ERR(rt)) return NULL; -#endif - if (likely(tos == mutable->tos)) + if (likely(tos == RT_TOS(mutable->tos))) *cache = build_cache(vport, mutable, rt); return rt; } } -static inline bool need_linearize(const struct sk_buff *skb) +static bool need_linearize(const struct sk_buff *skb) { int i; @@ -970,7 +1068,7 @@ static inline bool need_linearize(const struct sk_buff *skb) * change them from underneath us and we can skip the linearization. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1)) + if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1)) return true; return false; @@ -1067,11 +1165,11 @@ free_frags: * dropped so just free the rest. This may help improve the congestion * that caused the first packet to be dropped. */ - tnl_free_linked_skbs(skb); + ovs_tnl_free_linked_skbs(skb); return sent_len; } -int tnl_send(struct vport *vport, struct sk_buff *skb) +int ovs_tnl_send(struct vport *vport, struct sk_buff *skb) { struct tnl_vport *tnl_vport = tnl_vport_priv(vport); const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable); @@ -1124,8 +1222,6 @@ int tnl_send(struct vport *vport, struct sk_buff *skb) else tos = mutable->tos; - tos = INET_ECN_encapsulate(tos, inner_tos); - /* Route lookup */ rt = find_route(vport, mutable, tos, &cache); if (unlikely(!rt)) @@ -1133,6 +1229,8 @@ int tnl_send(struct vport *vport, struct sk_buff *skb) if (unlikely(!cache)) unattached_dst = &rt_dst(rt); + tos = INET_ECN_encapsulate(tos, inner_tos); + /* Reset SKB */ nf_reset(skb); secpath_reset(skb); @@ -1187,7 +1285,7 @@ int tnl_send(struct vport *vport, struct sk_buff *skb) skb_push(skb, cache->len); memcpy(skb->data, get_cached_header(cache), cache->len); skb_reset_mac_header(skb); - skb_set_network_header(skb, rt_dst(rt).hh->hh_len); + skb_set_network_header(skb, cache->hh_len); } else { skb_push(skb, mutable->tunnel_hlen); @@ -1209,14 +1307,16 @@ int tnl_send(struct vport *vport, struct sk_buff *skb) iph->frag_off = frag_off; ip_select_ident(iph, &rt_dst(rt), NULL); - skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb); + skb = tnl_vport->tnl_ops->update_header(vport, mutable, + &rt_dst(rt), skb); if (unlikely(!skb)) goto next; if (likely(cache)) { int orig_len = skb->len - cache->len; - struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev); + struct vport *cache_vport; + cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev); skb->protocol = htons(ETH_P_IP); iph = ip_hdr(skb); iph->tot_len = htons(skb->len - skb_network_offset(skb)); @@ -1229,7 +1329,7 @@ int tnl_send(struct vport *vport, struct sk_buff *skb) } OVS_CB(skb)->flow = cache->flow; - vport_receive(cache_vport, skb); + ovs_vport_receive(cache_vport, skb); sent_len += orig_len; } else { int xmit_err; @@ -1248,14 +1348,14 @@ next: } if (unlikely(sent_len == 0)) - vport_record_error(vport, VPORT_E_TX_DROPPED); + ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); goto out; error_free: - tnl_free_linked_skbs(skb); + ovs_tnl_free_linked_skbs(skb); error: - vport_record_error(vport, err); + ovs_vport_record_error(vport, err); out: dst_release(unattached_dst); return sent_len; @@ -1271,8 +1371,10 @@ static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = { [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 }, }; -/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */ -static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops, +/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be + * zeroed. */ +static int tnl_set_config(struct net *net, struct nlattr *options, + const struct tnl_ops *tnl_ops, const struct vport *cur_vport, struct tnl_mutable_config *mutable) { @@ -1293,13 +1395,18 @@ static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops, mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC; - if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) - mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]); + port_key_set_net(&mutable->key, net); mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]); + if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) { + if (ipv4_is_multicast(mutable->key.daddr)) + return -EINVAL; + mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]); + } if (a[OVS_TUNNEL_ATTR_TOS]) { mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]); - if (mutable->tos != RT_TOS(mutable->tos)) + /* Reject ToS config with ECN bits set. */ + if (mutable->tos & INET_ECN_MASK) return -EINVAL; } @@ -1326,19 +1433,32 @@ static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops, mutable->tunnel_hlen += sizeof(struct iphdr); - old_vport = tnl_find_port(mutable->key.saddr, mutable->key.daddr, - mutable->key.in_key, mutable->key.tunnel_type, - &old_mutable); - + old_vport = port_table_lookup(&mutable->key, &old_mutable); if (old_vport && old_vport != cur_vport) return -EEXIST; + mutable->mlink = 0; + if (ipv4_is_multicast(mutable->key.daddr)) { + struct net_device *dev; + struct rtable *rt; + + rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos); + if (IS_ERR(rt)) + return -EADDRNOTAVAIL; + dev = rt_dst(rt).dev; + ip_rt_put(rt); + if (__in_dev_get_rtnl(dev) == NULL) + return -EADDRNOTAVAIL; + mutable->mlink = dev->ifindex; + ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr); + } + return 0; } -struct vport *tnl_create(const struct vport_parms *parms, - const struct vport_ops *vport_ops, - const struct tnl_ops *tnl_ops) +struct vport *ovs_tnl_create(const struct vport_parms *parms, + const struct vport_ops *vport_ops, + const struct tnl_ops *tnl_ops) { struct vport *vport; struct tnl_vport *tnl_vport; @@ -1346,7 +1466,7 @@ struct vport *tnl_create(const struct vport_parms *parms, int initial_frag_id; int err; - vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms); + vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto error; @@ -1363,12 +1483,13 @@ struct vport *tnl_create(const struct vport_parms *parms, goto error_free_vport; } - vport_gen_rand_ether_addr(mutable->eth_addr); + random_ether_addr(mutable->eth_addr); get_random_bytes(&initial_frag_id, sizeof(int)); atomic_set(&tnl_vport->frag_id, initial_frag_id); - err = tnl_set_config(parms->options, tnl_ops, NULL, mutable); + err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops, + NULL, mutable); if (err) goto error_free_mutable; @@ -1385,14 +1506,15 @@ struct vport *tnl_create(const struct vport_parms *parms, return vport; error_free_mutable: + free_mutable_rtnl(mutable); kfree(mutable); error_free_vport: - vport_free(vport); + ovs_vport_free(vport); error: return ERR_PTR(err); } -int tnl_set_options(struct vport *vport, struct nlattr *options) +int ovs_tnl_set_options(struct vport *vport, struct nlattr *options) { struct tnl_vport *tnl_vport = tnl_vport_priv(vport); const struct tnl_mutable_config *old_mutable; @@ -1411,39 +1533,48 @@ int tnl_set_options(struct vport *vport, struct nlattr *options) memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN); /* Parse the others configured by userspace. */ - err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable); + err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops, + vport, mutable); if (err) goto error_free; if (port_hash(&mutable->key) != port_hash(&old_mutable->key)) port_table_move_port(vport, mutable); + else + assign_config_rcu(vport, mutable); return 0; error_free: + free_mutable_rtnl(mutable); kfree(mutable); error: return err; } -int tnl_get_options(const struct vport *vport, struct sk_buff *skb) +int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb) { const struct tnl_vport *tnl_vport = tnl_vport_priv(vport); const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable); - NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC); - NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr); - - if (!(mutable->flags & TNL_F_IN_KEY_MATCH)) - NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key); - if (!(mutable->flags & TNL_F_OUT_KEY_ACTION)) - NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key); - if (mutable->key.saddr) - NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr); - if (mutable->tos) - NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos); - if (mutable->ttl) - NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl); + if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS, + mutable->flags & TNL_F_PUBLIC) || + nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr)) + goto nla_put_failure; + + if (!(mutable->flags & TNL_F_IN_KEY_MATCH) && + nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key)) + goto nla_put_failure; + if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) && + nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key)) + goto nla_put_failure; + if (mutable->key.saddr && + nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr)) + goto nla_put_failure; + if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos)) + goto nla_put_failure; + if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl)) + goto nla_put_failure; return 0; @@ -1458,48 +1589,51 @@ static void free_port_rcu(struct rcu_head *rcu) free_cache((struct tnl_cache __force *)tnl_vport->cache); kfree((struct tnl_mutable __force *)tnl_vport->mutable); - vport_free(tnl_vport_to_vport(tnl_vport)); + ovs_vport_free(tnl_vport_to_vport(tnl_vport)); } -void tnl_destroy(struct vport *vport) +void ovs_tnl_destroy(struct vport *vport) { struct tnl_vport *tnl_vport = tnl_vport_priv(vport); - const struct tnl_mutable_config *mutable; + struct tnl_mutable_config *mutable; mutable = rtnl_dereference(tnl_vport->mutable); port_table_remove_port(vport); + free_mutable_rtnl(mutable); call_rcu(&tnl_vport->rcu, free_port_rcu); } -int tnl_set_addr(struct vport *vport, const unsigned char *addr) +int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr) { struct tnl_vport *tnl_vport = tnl_vport_priv(vport); - struct tnl_mutable_config *mutable; + struct tnl_mutable_config *old_mutable, *mutable; - mutable = kmemdup(rtnl_dereference(tnl_vport->mutable), - sizeof(struct tnl_mutable_config), GFP_KERNEL); + old_mutable = rtnl_dereference(tnl_vport->mutable); + mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL); if (!mutable) return -ENOMEM; + old_mutable->mlink = 0; + memcpy(mutable->eth_addr, addr, ETH_ALEN); assign_config_rcu(vport, mutable); return 0; } -const char *tnl_get_name(const struct vport *vport) +const char *ovs_tnl_get_name(const struct vport *vport) { const struct tnl_vport *tnl_vport = tnl_vport_priv(vport); return tnl_vport->name; } -const unsigned char *tnl_get_addr(const struct vport *vport) +const unsigned char *ovs_tnl_get_addr(const struct vport *vport) { const struct tnl_vport *tnl_vport = tnl_vport_priv(vport); return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr; } -void tnl_free_linked_skbs(struct sk_buff *skb) +void ovs_tnl_free_linked_skbs(struct sk_buff *skb) { while (skb) { struct sk_buff *next = skb->next; @@ -1508,12 +1642,12 @@ void tnl_free_linked_skbs(struct sk_buff *skb) } } -int tnl_init(void) +int ovs_tnl_init(void) { int i; port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *), - GFP_KERNEL); + GFP_KERNEL); if (!port_table) return -ENOMEM; @@ -1523,21 +1657,7 @@ int tnl_init(void) return 0; } -void tnl_exit(void) +void ovs_tnl_exit(void) { - int i; - - for (i = 0; i < PORT_TABLE_SIZE; i++) { - struct tnl_vport * tnl_vport; - struct hlist_head *hash_head; - struct hlist_node *n; - - hash_head = &port_table[i]; - hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) { - BUG(); - goto out; - } - } -out: kfree(port_table); }