#include "vport-generic.h"
#include "vport-internal_dev.h"
-#ifdef NEED_CACHE_TIMEOUT
-/*
- * On kernels where we can't quickly detect changes in the rest of the system
- * we use an expiration time to invalidate the cache. A shorter expiration
- * reduces the length of time that we may potentially blackhole packets while
- * a longer time increases performance by reducing the frequency that the
- * cache needs to be rebuilt. A variety of factors may cause the cache to be
- * invalidated before the expiration time but this is the maximum. The time
- * is expressed in jiffies.
- */
-#define MAX_CACHE_EXP HZ
-#endif
-
-/*
- * Interval to check for and remove caches that are no longer valid. Caches
- * are checked for validity before they are used for packet encapsulation and
- * old caches are removed at that time. However, if no packets are sent through
- * the tunnel then the cache will never be destroyed. Since it holds
- * references to a number of system objects, the cache will continue to use
- * system resources by not allowing those objects to be destroyed. The cache
- * cleaner is periodically run to free invalid caches. It does not
- * significantly affect system performance. A lower interval will release
- * resources faster but will itself consume resources by requiring more frequent
- * checks. A longer interval may result in messages being printed to the kernel
- * message buffer about unreleased resources. The interval is expressed in
- * jiffies.
- */
-#define CACHE_CLEANER_INTERVAL (5 * HZ)
-
-#define CACHE_DATA_ALIGN 16
#define PORT_TABLE_SIZE 1024
static struct hlist_head *port_table __read_mostly;
-static int port_table_count;
-
-static void cache_cleaner(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
/*
* These are just used as an optimization: they don't require any kind of
#define rt_dst(rt) (rt->u.dst)
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
-static struct hh_cache *rt_hh(struct rtable *rt)
-{
- struct neighbour *neigh = dst_get_neighbour_noref(&rt->dst);
- if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
- !neigh->hh.hh_len)
- return NULL;
- return &neigh->hh;
-}
-#else
-#define rt_hh(rt) (rt_dst(rt).hh)
-#endif
-
static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
{
return vport_from_priv(tnl_vport);
}
-/* This is analogous to rtnl_dereference for the tunnel cache. It checks that
- * cache_lock is held, so it is only for update side code.
- */
-static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
-{
- return rcu_dereference_protected(tnl_vport->cache,
- lockdep_is_held(&tnl_vport->cache_lock));
-}
-
-static void schedule_cache_cleaner(void)
-{
- schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
-}
-
-static void free_cache(struct tnl_cache *cache)
-{
- if (!cache)
- return;
-
- ovs_flow_put(cache->flow);
- ip_rt_put(cache->rt);
- kfree(cache);
-}
-
static void free_config_rcu(struct rcu_head *rcu)
{
struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
kfree(c);
}
-static void free_cache_rcu(struct rcu_head *rcu)
-{
- struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
- free_cache(c);
-}
-
/* Frees the portion of 'mutable' that requires RTNL and thus can't happen
* within an RCU callback. Fortunately this part doesn't require waiting for
* an RCU grace period.
call_rcu(&old_config->rcu, free_config_rcu);
}
-static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *old_cache;
-
- old_cache = cache_dereference(tnl_vport);
- rcu_assign_pointer(tnl_vport->cache, new_cache);
-
- if (old_cache)
- call_rcu(&old_cache->rcu, free_cache_rcu);
-}
-
static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
{
bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
const struct tnl_mutable_config *mutable;
u32 hash;
- if (port_table_count == 0)
- schedule_cache_cleaner();
-
mutable = rtnl_dereference(tnl_vport->mutable);
hash = port_hash(&mutable->key);
hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
- port_table_count++;
(*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
}
hlist_del_init_rcu(&tnl_vport->hash_node);
- port_table_count--;
- if (port_table_count == 0)
- cancel_delayed_work_sync(&cache_cleaner_wq);
-
(*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
}
return true;
}
-static void create_tunnel_header(const struct vport *vport,
- const struct tnl_mutable_config *mutable,
- const struct ovs_key_ipv4_tunnel *tun_key,
- const struct rtable *rt, void *header)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct iphdr *iph = header;
-
- iph->version = 4;
- iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = htons(IP_DF);
- iph->protocol = tnl_vport->tnl_ops->ipproto;
- iph->tos = mutable->tos;
- iph->daddr = rt->rt_dst;
- iph->saddr = rt->rt_src;
- iph->ttl = mutable->ttl;
- if (!iph->ttl)
- iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
-
- tnl_vport->tnl_ops->build_header(vport, mutable, tun_key, iph + 1);
-}
-
-static void *get_cached_header(const struct tnl_cache *cache)
-{
- return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
-}
-
-#ifdef HAVE_RT_GENID
-static inline int rt_genid(struct net *net)
-{
- return atomic_read(&net->ipv4.rt_genid);
-}
-#endif
-
-static bool check_cache_valid(const struct tnl_cache *cache,
- const struct tnl_mutable_config *mutable)
-{
- struct hh_cache *hh;
-
- if (!cache)
- return false;
-
- hh = rt_hh(cache->rt);
- return hh &&
-#ifdef NEED_CACHE_TIMEOUT
- time_before(jiffies, cache->expiration) &&
-#endif
-#ifdef HAVE_RT_GENID
- rt_genid(dev_net(rt_dst(cache->rt).dev)) == cache->rt->rt_genid &&
-#endif
-#ifdef HAVE_HH_SEQ
- hh->hh_lock.sequence == cache->hh_seq &&
-#endif
- mutable->seq == cache->mutable_seq &&
- (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
- (cache->flow && !cache->flow->dead));
-}
-
-static void __cache_cleaner(struct tnl_vport *tnl_vport)
-{
- const struct tnl_mutable_config *mutable =
- rcu_dereference(tnl_vport->mutable);
- const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
-
- if (cache && !check_cache_valid(cache, mutable) &&
- spin_trylock_bh(&tnl_vport->cache_lock)) {
- assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
- spin_unlock_bh(&tnl_vport->cache_lock);
- }
-}
-
-static void cache_cleaner(struct work_struct *work)
-{
- int i;
-
- schedule_cache_cleaner();
-
- rcu_read_lock();
- for (i = 0; i < PORT_TABLE_SIZE; i++) {
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct tnl_vport *tnl_vport;
-
- bucket = &port_table[i];
- hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
- __cache_cleaner(tnl_vport);
- }
- rcu_read_unlock();
-}
-
-static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
-{
- void *cache_data = get_cached_header(cache);
- int hh_off;
-
-#ifdef HAVE_HH_SEQ
- unsigned hh_seq;
-
- do {
- hh_seq = read_seqbegin(&hh->hh_lock);
- hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
- memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
- cache->hh_len = hh->hh_len;
- } while (read_seqretry(&hh->hh_lock, hh_seq));
-
- cache->hh_seq = hh_seq;
-#else
- read_lock(&hh->hh_lock);
- hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
- memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
- cache->hh_len = hh->hh_len;
- read_unlock(&hh->hh_lock);
-#endif
-}
-
-static struct tnl_cache *build_cache(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct rtable *rt)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- static const struct ovs_key_ipv4_tunnel tun_key;
- struct tnl_cache *cache;
- void *cache_data;
- int cache_len;
- struct hh_cache *hh;
- int tunnel_hlen;
-
- if (!(mutable->flags & TNL_F_HDR_CACHE))
- return NULL;
-
- tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, &tun_key);
- if (tunnel_hlen < 0)
- return NULL;
-
- tunnel_hlen += sizeof(struct iphdr);
-
- /*
- * If there is no entry in the ARP cache or if this device does not
- * support hard header caching just fall back to the IP stack.
- */
-
- hh = rt_hh(rt);
- if (!hh)
- return NULL;
-
- /*
- * If lock is contended fall back to directly building the header.
- * We're not going to help performance by sitting here spinning.
- */
- if (!spin_trylock(&tnl_vport->cache_lock))
- return NULL;
-
- cache = cache_dereference(tnl_vport);
- if (check_cache_valid(cache, mutable))
- goto unlock;
- else
- cache = NULL;
-
- cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + tunnel_hlen;
-
- cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
- cache_len, GFP_ATOMIC);
- if (!cache)
- goto unlock;
-
- create_eth_hdr(cache, hh);
- cache_data = get_cached_header(cache) + cache->hh_len;
- cache->len = cache->hh_len + tunnel_hlen;
-
- create_tunnel_header(vport, mutable, &tun_key, rt, cache_data);
-
- cache->mutable_seq = mutable->seq;
- cache->rt = rt;
-#ifdef NEED_CACHE_TIMEOUT
- cache->expiration = jiffies + tnl_vport->cache_exp_interval;
-#endif
-
- if (ovs_is_internal_dev(rt_dst(rt).dev)) {
- struct sw_flow_key flow_key;
- struct vport *dst_vport;
- struct sk_buff *skb;
- int err;
- int flow_key_len;
- struct sw_flow *flow;
-
- dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
- if (!dst_vport)
- goto done;
-
- skb = alloc_skb(cache->len, GFP_ATOMIC);
- if (!skb)
- goto done;
-
- __skb_put(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
-
- err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len);
-
- consume_skb(skb);
- if (err)
- goto done;
-
- flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len);
- if (flow) {
- cache->flow = flow;
- ovs_flow_hold(flow);
- }
- }
-
-done:
- assign_cache_rcu(vport, cache);
-
-unlock:
- spin_unlock(&tnl_vport->cache_lock);
-
- return cache;
-}
-
-static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
+static struct rtable *find_route(const struct tnl_mutable_config *mutable,
__be32 saddr, __be32 daddr, u8 ipproto,
u8 tos)
{
#endif
}
-static struct rtable *find_route(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- __be32 saddr, __be32 daddr, u8 tos,
- struct tnl_cache **cache)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
-
- *cache = NULL;
- tos = RT_TOS(tos);
-
- if (tos == RT_TOS(mutable->tos) &&
- check_cache_valid(cur_cache, mutable)) {
- *cache = cur_cache;
- return cur_cache->rt;
- } else {
- struct rtable *rt;
-
- rt = __find_route(mutable, saddr, daddr,
- tnl_vport->tnl_ops->ipproto, tos);
- if (IS_ERR(rt))
- return NULL;
- if (likely(tos == RT_TOS(mutable->tos)))
- *cache = build_cache(vport, mutable, rt);
-
- return rt;
- }
-}
-
static bool need_linearize(const struct sk_buff *skb)
{
int i;
const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
enum vport_err_type err = VPORT_E_TX_ERROR;
struct rtable *rt;
- struct dst_entry *unattached_dst = NULL;
- struct tnl_cache *cache;
struct ovs_key_ipv4_tunnel tun_key;
int sent_len = 0;
int tunnel_hlen;
}
/* Route lookup */
- rt = find_route(vport, mutable, saddr, daddr, tos, &cache);
- if (unlikely(!rt))
+ rt = find_route(mutable, saddr, daddr, tnl_vport->tnl_ops->ipproto, tos);
+ if (IS_ERR(rt))
goto error_free;
- if (unlikely(!cache))
- unattached_dst = &rt_dst(rt);
/* Reset SKB */
nf_reset(skb);
/* Offloading */
skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
- if (IS_ERR(skb))
- goto error;
+ if (IS_ERR(skb)) {
+ skb = NULL;
+ goto err_free_rt;
+ }
/* MTU */
if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off, tunnel_hlen))) {
err = VPORT_E_TX_DROPPED;
- goto error_free;
- }
-
- /*
- * If we are over the MTU, allow the IP stack to handle fragmentation.
- * Fragmentation is a slow path anyways.
- */
- if (unlikely(skb->len + tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
- cache)) {
- unattached_dst = &rt_dst(rt);
- dst_hold(unattached_dst);
- cache = NULL;
+ goto err_free_rt;
}
/* TTL Fixup. */
if (unlikely(vlan_deaccel_tag(skb)))
goto next;
- if (likely(cache)) {
- skb_push(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
- skb_reset_mac_header(skb);
- skb_set_network_header(skb, cache->hh_len);
-
- } else {
- skb_push(skb, tunnel_hlen);
- create_tunnel_header(vport, mutable, OVS_CB(skb)->tun_key, rt, skb->data);
- skb_reset_network_header(skb);
-
- if (next_skb)
- skb_dst_set(skb, dst_clone(unattached_dst));
- else {
- skb_dst_set(skb, unattached_dst);
- unattached_dst = NULL;
- }
- }
- skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
+ skb_push(skb, tunnel_hlen);
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ if (next_skb)
+ skb_dst_set(skb, dst_clone(&rt_dst(rt)));
+ else
+ skb_dst_set(skb, &rt_dst(rt));
+
+ /* Push IP header. */
iph = ip_hdr(skb);
- iph->tos = tos;
- iph->ttl = ttl;
- iph->frag_off = frag_off;
+ iph->version = 4;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->protocol = tnl_vport->tnl_ops->ipproto;
+ iph->daddr = rt->rt_dst;
+ iph->saddr = rt->rt_src;
+ iph->tos = tos;
+ iph->ttl = ttl;
+ iph->frag_off = frag_off;
ip_select_ident(iph, &rt_dst(rt), NULL);
- skb = tnl_vport->tnl_ops->update_header(vport, mutable,
+ /* Push Tunnel header. */
+ skb = tnl_vport->tnl_ops->build_header(vport, mutable,
&rt_dst(rt), skb, tunnel_hlen);
if (unlikely(!skb))
goto next;
- if (likely(cache)) {
- int orig_len = skb->len - cache->len;
- struct vport *cache_vport;
-
- cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
- skb->protocol = htons(ETH_P_IP);
- iph = ip_hdr(skb);
- iph->tot_len = htons(skb->len - skb_network_offset(skb));
- ip_send_check(iph);
-
- if (cache_vport) {
- if (unlikely(compute_ip_summed(skb, true))) {
- kfree_skb(skb);
- goto next;
- }
-
- OVS_CB(skb)->flow = cache->flow;
- ovs_vport_receive(cache_vport, skb);
- sent_len += orig_len;
- } else {
- int xmit_err;
-
- skb->dev = rt_dst(rt).dev;
- xmit_err = dev_queue_xmit(skb);
-
- if (likely(net_xmit_eval(xmit_err) == 0))
- sent_len += orig_len;
- }
- } else
- sent_len += send_frags(skb, tunnel_hlen);
+ sent_len += send_frags(skb, tunnel_hlen);
next:
skb = next_skb;
if (unlikely(sent_len == 0))
ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
- goto out;
+ return sent_len;
+err_free_rt:
+ ip_rt_put(rt);
error_free:
ovs_tnl_free_linked_skbs(skb);
-error:
ovs_vport_record_error(vport, err);
-out:
- dst_release(unattached_dst);
return sent_len;
}
struct net_device *dev;
struct rtable *rt;
- rt = __find_route(mutable, mutable->key.saddr, mutable->key.daddr,
+ rt = find_route(mutable, mutable->key.saddr, mutable->key.daddr,
tnl_ops->ipproto, mutable->tos);
if (IS_ERR(rt))
return -EADDRNOTAVAIL;
if (err)
goto error_free_mutable;
- spin_lock_init(&tnl_vport->cache_lock);
-
-#ifdef NEED_CACHE_TIMEOUT
- tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
- (net_random() % (MAX_CACHE_EXP / 2));
-#endif
-
rcu_assign_pointer(tnl_vport->mutable, mutable);
port_table_add_port(vport);
struct tnl_vport *tnl_vport = container_of(rcu,
struct tnl_vport, rcu);
- free_cache((struct tnl_cache __force *)tnl_vport->cache);
kfree((struct tnl_mutable __force *)tnl_vport->mutable);
ovs_vport_free(tnl_vport_to_vport(tnl_vport));
}
/* All public tunnel flags. */
#define TNL_F_PUBLIC (TNL_F_CSUM | TNL_F_TOS_INHERIT | TNL_F_TTL_INHERIT | \
TNL_F_DF_INHERIT | TNL_F_DF_DEFAULT | TNL_F_PMTUD | \
- TNL_F_HDR_CACHE | TNL_F_IPSEC)
+ TNL_F_IPSEC)
/**
* struct port_lookup_key - Tunnel port key, used as hash table key.
*/
int (*hdr_len)(const struct tnl_mutable_config *,
const struct ovs_key_ipv4_tunnel *);
-
/*
- * Builds the static portion of the tunnel header, which is stored in
- * the header cache. In general the performance of this function is
- * not too important as we try to only call it when building the cache
- * so it is preferable to shift as much work as possible here. However,
- * in some circumstances caching is disabled and this function will be
- * called for every packet, so try not to make it too slow.
+ * Returns a linked list of SKBs with tunnel headers (multiple
+ * packets may be generated in the event of fragmentation). Space
+ * will have already been allocated at the start of the packet equal
+ * to sizeof(struct iphdr) + value returned by hdr_len(). The IP
+ * header will have already been constructed.
*/
- void (*build_header)(const struct vport *,
- const struct tnl_mutable_config *,
- const struct ovs_key_ipv4_tunnel *, void *header);
-
- /*
- * Updates the cached header of a packet to match the actual packet
- * data. Typical things that might need to be updated are length,
- * checksum, etc. The IP header will have already been updated and this
- * is the final step before transmission. Returns a linked list of
- * completed SKBs (multiple packets may be generated in the event
- * of fragmentation).
- */
- struct sk_buff *(*update_header)(const struct vport *,
+ struct sk_buff *(*build_header)(const struct vport *,
const struct tnl_mutable_config *,
struct dst_entry *, struct sk_buff *,
int tunnel_hlen);
};
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
-/*
- * On these kernels we have a fast mechanism to tell if the ARP cache for a
- * particular destination has changed.
- */
-#define HAVE_HH_SEQ
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
-/*
- * On these kernels we have a fast mechanism to tell if the routing table
- * has changed.
- */
-#define HAVE_RT_GENID
-#endif
-#if !defined(HAVE_HH_SEQ) || !defined(HAVE_RT_GENID)
-/* If we can't detect all system changes directly we need to use a timeout. */
-#define NEED_CACHE_TIMEOUT
-#endif
-struct tnl_cache {
- struct rcu_head rcu;
-
- int len; /* Length of data to be memcpy'd from cache. */
- int hh_len; /* Hardware hdr length, cached from hh_cache. */
-
- /* Sequence number of mutable->seq from which this cache was
- * generated. */
- unsigned mutable_seq;
-
-#ifdef HAVE_HH_SEQ
- /*
- * The sequence number from the seqlock protecting the hardware header
- * cache (in the ARP cache). Since every write increments the counter
- * this gives us an easy way to tell if it has changed.
- */
- unsigned hh_seq;
-#endif
-
-#ifdef NEED_CACHE_TIMEOUT
- /*
- * If we don't have direct mechanisms to detect all important changes in
- * the system fall back to an expiration time. This expiration time
- * can be relatively short since at high rates there will be millions of
- * packets per second, so we'll still get plenty of benefit from the
- * cache. Note that if something changes we may blackhole packets
- * until the expiration time (depending on what changed and the kernel
- * version we may be able to detect the change sooner). Expiration is
- * expressed as a time in jiffies.
- */
- unsigned long expiration;
-#endif
-
- /*
- * The routing table entry that is the result of looking up the tunnel
- * endpoints. It also contains a sequence number (called a generation
- * ID) that can be compared to a global sequence to tell if the routing
- * table has changed (and therefore there is a potential that this
- * cached route has been invalidated).
- */
- struct rtable *rt;
-
- /*
- * If the output device for tunnel traffic is an OVS internal device,
- * the flow of that datapath. Since all tunnel traffic will have the
- * same headers this allows us to cache the flow lookup. NULL if the
- * output device is not OVS or if there is no flow installed.
- */
- struct sw_flow *flow;
-
- /* The cached header follows after padding for alignment. */
-};
-
struct tnl_vport {
struct rcu_head rcu;
struct hlist_node hash_node;
* this is not needed.
*/
atomic_t frag_id;
-
- spinlock_t cache_lock;
- struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */
-
-#ifdef NEED_CACHE_TIMEOUT
- /*
- * If we must rely on expiration time to invalidate the cache, this is
- * the interval. It is randomized within a range (defined by
- * MAX_CACHE_EXP in tunnel.c) to avoid synchronized expirations caused
- * by creation of a large number of tunnels at a one time.
- */
- unsigned long cache_exp_interval;
-#endif
};
struct vport *ovs_tnl_create(const struct vport_parms *, const struct vport_ops *,