/*
- * Copyright (c) 2010 Nicira Networks.
- * Distributed under the terms of the GNU GPL version 2.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
- * Significant portions of this file may be copied from parts of the Linux
- * kernel, by Linus Torvalds and others.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/if_vlan.h>
+#include <linux/igmp.h>
#include <linux/in.h>
#include <linux/in_route.h>
+#include <linux/inetdevice.h>
#include <linux/jhash.h>
+#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/workqueue.h>
+#include <linux/rculist.h>
#include <net/dsfield.h>
#include <net/dst.h>
#include <net/route.h>
#include <net/xfrm.h>
-#include "actions.h"
#include "checksum.h"
#include "datapath.h"
-#include "table.h"
#include "tunnel.h"
+#include "vlan.h"
#include "vport.h"
#include "vport-generic.h"
#include "vport-internal_dev.h"
-#ifdef NEED_CACHE_TIMEOUT
-/*
- * On kernels where we can't quickly detect changes in the rest of the system
- * we use an expiration time to invalidate the cache. A shorter expiration
- * reduces the length of time that we may potentially blackhole packets while
- * a longer time increases performance by reducing the frequency that the
- * cache needs to be rebuilt. A variety of factors may cause the cache to be
- * invalidated before the expiration time but this is the maximum. The time
- * is expressed in jiffies.
- */
-#define MAX_CACHE_EXP HZ
-#endif
-
-/*
- * Interval to check for and remove caches that are no longer valid. Caches
- * are checked for validity before they are used for packet encapsulation and
- * old caches are removed at that time. However, if no packets are sent through
- * the tunnel then the cache will never be destroyed. Since it holds
- * references to a number of system objects, the cache will continue to use
- * system resources by not allowing those objects to be destroyed. The cache
- * cleaner is periodically run to free invalid caches. It does not
- * significantly affect system performance. A lower interval will release
- * resources faster but will itself consume resources by requiring more frequent
- * checks. A longer interval may result in messages being printed to the kernel
- * message buffer about unreleased resources. The interval is expressed in
- * jiffies.
- */
-#define CACHE_CLEANER_INTERVAL (5 * HZ)
-
-#define CACHE_DATA_ALIGN 16
-
-/* Protected by RCU. */
-static struct tbl *port_table __read_mostly;
+#define PORT_TABLE_SIZE 1024
-static void cache_cleaner(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
+static struct hlist_head *port_table __read_mostly;
/*
* These are just used as an optimization: they don't require any kind of
*/
static unsigned int key_local_remote_ports __read_mostly;
static unsigned int key_remote_ports __read_mostly;
+static unsigned int key_multicast_ports __read_mostly;
static unsigned int local_remote_ports __read_mostly;
static unsigned int remote_ports __read_mostly;
+static unsigned int null_ports __read_mostly;
+static unsigned int multicast_ports __read_mostly;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
#define rt_dst(rt) (rt->dst)
#define rt_dst(rt) (rt->u.dst)
#endif
-static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
+static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
{
return vport_from_priv(tnl_vport);
}
-static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
-{
- return container_of(node, struct tnl_vport, tbl_node);
-}
-
-static inline void schedule_cache_cleaner(void)
-{
- schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
-}
-
-static void free_cache(struct tnl_cache *cache)
-{
- if (!cache)
- return;
-
- flow_put(cache->flow);
- ip_rt_put(cache->rt);
- kfree(cache);
-}
-
static void free_config_rcu(struct rcu_head *rcu)
{
struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
kfree(c);
}
-static void free_cache_rcu(struct rcu_head *rcu)
+/* Frees the portion of 'mutable' that requires RTNL and thus can't happen
+ * within an RCU callback. Fortunately this part doesn't require waiting for
+ * an RCU grace period.
+ */
+static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
{
- struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
- free_cache(c);
+ ASSERT_RTNL();
+ if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
+ struct in_device *in_dev;
+ in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
+ if (in_dev)
+ ip_mc_dec_group(in_dev, mutable->key.daddr);
+ }
}
static void assign_config_rcu(struct vport *vport,
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *old_config;
- old_config = tnl_vport->mutable;
+ old_config = rtnl_dereference(tnl_vport->mutable);
rcu_assign_pointer(tnl_vport->mutable, new_config);
- call_rcu(&old_config->rcu, free_config_rcu);
-}
-
-static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *old_cache;
-
- old_cache = tnl_vport->cache;
- rcu_assign_pointer(tnl_vport->cache, new_cache);
- if (old_cache)
- call_rcu(&old_cache->rcu, free_cache_rcu);
+ free_mutable_rtnl(old_config);
+ call_rcu(&old_config->rcu, free_config_rcu);
}
static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
{
- if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
- if (mutable->port_config.saddr)
+ bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
+
+ if (mutable->flags & TNL_F_IN_KEY_MATCH) {
+ if (mutable->key.saddr)
return &local_remote_ports;
+ else if (is_multicast)
+ return &multicast_ports;
else
return &remote_ports;
} else {
- if (mutable->port_config.saddr)
+ if (mutable->key.saddr)
return &key_local_remote_ports;
- else
+ else if (is_multicast)
+ return &key_multicast_ports;
+ else if (mutable->key.daddr)
return &key_remote_ports;
+ else
+ return &null_ports;
}
}
-struct port_lookup_key {
- u32 tunnel_type;
- __be32 saddr;
- __be32 daddr;
- __be32 key;
- const struct tnl_mutable_config *mutable;
-};
-
-/*
- * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
- * the comparision.
- */
-static int port_cmp(const struct tbl_node *node, void *target)
+static u32 port_hash(const struct port_lookup_key *key)
{
- const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
- struct port_lookup_key *lookup = target;
-
- lookup->mutable = rcu_dereference(tnl_vport->mutable);
-
- return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
- lookup->mutable->port_config.daddr == lookup->daddr &&
- lookup->mutable->port_config.in_key == lookup->key &&
- lookup->mutable->port_config.saddr == lookup->saddr);
+ return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
}
-static u32 port_hash(struct port_lookup_key *k)
+static struct hlist_head *find_bucket(u32 hash)
{
- return jhash_3words(k->key, k->saddr, k->daddr, k->tunnel_type);
+ return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
}
-static u32 mutable_hash(const struct tnl_mutable_config *mutable)
+static void port_table_add_port(struct vport *vport)
{
- struct port_lookup_key lookup;
-
- lookup.saddr = mutable->port_config.saddr;
- lookup.daddr = mutable->port_config.daddr;
- lookup.key = mutable->port_config.in_key;
- lookup.tunnel_type = mutable->tunnel_type;
-
- return port_hash(&lookup);
-}
+ struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+ const struct tnl_mutable_config *mutable;
+ u32 hash;
-static void check_table_empty(void)
-{
- if (tbl_count(port_table) == 0) {
- struct tbl *old_table = port_table;
+ mutable = rtnl_dereference(tnl_vport->mutable);
+ hash = port_hash(&mutable->key);
+ hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
- cancel_delayed_work_sync(&cache_cleaner_wq);
- rcu_assign_pointer(port_table, NULL);
- tbl_deferred_destroy(old_table, NULL);
- }
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
}
-static int add_port(struct vport *vport)
+static void port_table_move_port(struct vport *vport,
+ struct tnl_mutable_config *new_mutable)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- int err;
-
- if (!port_table) {
- struct tbl *new_table;
-
- new_table = tbl_create(0);
- if (!new_table)
- return -ENOMEM;
-
- rcu_assign_pointer(port_table, new_table);
- schedule_cache_cleaner();
-
- } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
- struct tbl *old_table = port_table;
- struct tbl *new_table;
-
- new_table = tbl_expand(old_table);
- if (IS_ERR(new_table))
- return PTR_ERR(new_table);
-
- rcu_assign_pointer(port_table, new_table);
- tbl_deferred_destroy(old_table, NULL);
- }
-
- err = tbl_insert(port_table, &tnl_vport->tbl_node, mutable_hash(tnl_vport->mutable));
- if (err) {
- check_table_empty();
- return err;
- }
+ u32 hash;
- (*find_port_pool(tnl_vport->mutable))++;
+ hash = port_hash(&new_mutable->key);
+ hlist_del_init_rcu(&tnl_vport->hash_node);
+ hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
- return 0;
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
+ assign_config_rcu(vport, new_mutable);
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
}
-static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
+static void port_table_remove_port(struct vport *vport)
{
- int err;
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- u32 hash;
- hash = mutable_hash(new_mutable);
- if (hash == tnl_vport->tbl_node.hash)
- goto table_updated;
+ hlist_del_init_rcu(&tnl_vport->hash_node);
- /*
- * Ideally we should make this move atomic to avoid having gaps in
- * finding tunnels or the possibility of failure. However, if we do
- * find a tunnel it will always be consistent.
- */
- err = tbl_remove(port_table, &tnl_vport->tbl_node);
- if (err)
- return err;
-
- err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
- if (err) {
- (*find_port_pool(tnl_vport->mutable))--;
- check_table_empty();
- return err;
- }
-
-table_updated:
- (*find_port_pool(tnl_vport->mutable))--;
- assign_config_rcu(vport, new_mutable);
- (*find_port_pool(tnl_vport->mutable))++;
-
- return 0;
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
}
-static int del_port(struct vport *vport)
+static struct vport *port_table_lookup(struct port_lookup_key *key,
+ const struct tnl_mutable_config **pmutable)
{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- int err;
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ u32 hash = port_hash(key);
+ struct tnl_vport *tnl_vport;
- err = tbl_remove(port_table, &tnl_vport->tbl_node);
- if (err)
- return err;
+ bucket = find_bucket(hash);
- check_table_empty();
- (*find_port_pool(tnl_vport->mutable))--;
+ hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
+ struct tnl_mutable_config *mutable;
- return 0;
+ mutable = rcu_dereference_rtnl(tnl_vport->mutable);
+ if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
+ *pmutable = mutable;
+ return tnl_vport_to_vport(tnl_vport);
+ }
+ }
+
+ return NULL;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
- int tunnel_type,
- const struct tnl_mutable_config **mutable)
+struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
+ __be64 key, int tunnel_type,
+ const struct tnl_mutable_config **mutable)
{
struct port_lookup_key lookup;
- struct tbl *table = rcu_dereference(port_table);
- struct tbl_node *tbl_node;
-
- if (unlikely(!table))
- return NULL;
+ struct vport *vport;
+ bool is_multicast = ipv4_is_multicast(saddr);
+ port_key_set_net(&lookup, net);
lookup.saddr = saddr;
lookup.daddr = daddr;
- if (tunnel_type & TNL_T_KEY_EXACT) {
- lookup.key = key;
- lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
-
- if (key_local_remote_ports) {
- tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
- if (tbl_node)
- goto found;
- }
-
- if (key_remote_ports) {
- lookup.saddr = 0;
-
- tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
- if (tbl_node)
- goto found;
+ /* First try for exact match on in_key. */
+ lookup.in_key = key;
+ lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
+ if (!is_multicast && key_local_remote_ports) {
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
+ }
+ if (key_remote_ports) {
+ lookup.saddr = 0;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
- lookup.saddr = saddr;
- }
+ lookup.saddr = saddr;
}
- if (tunnel_type & TNL_T_KEY_MATCH) {
- lookup.key = 0;
- lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
+ /* Then try matches that wildcard in_key. */
+ lookup.in_key = 0;
+ lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
+ if (!is_multicast && local_remote_ports) {
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
+ }
+ if (remote_ports) {
+ lookup.saddr = 0;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
+ }
- if (local_remote_ports) {
- tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
- if (tbl_node)
- goto found;
+ if (is_multicast) {
+ lookup.saddr = 0;
+ lookup.daddr = saddr;
+ if (key_multicast_ports) {
+ lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
+ lookup.in_key = key;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
}
-
- if (remote_ports) {
- lookup.saddr = 0;
-
- tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
- if (tbl_node)
- goto found;
+ if (multicast_ports) {
+ lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
+ lookup.in_key = 0;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
}
}
+ if (null_ports) {
+ lookup.daddr = 0;
+ lookup.saddr = 0;
+ lookup.tunnel_type = tunnel_type;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
+ }
return NULL;
-
-found:
- *mutable = lookup.mutable;
- return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
}
-static inline void ecn_decapsulate(struct sk_buff *skb)
+static void ecn_decapsulate(struct sk_buff *skb)
{
- /* This is accessing the outer IP header of the tunnel, which we've
- * already validated to be OK. skb->data is currently set to the start
- * of the inner Ethernet header, and we've validated ETH_HLEN.
- */
- if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
+ if (unlikely(INET_ECN_is_ce(OVS_CB(skb)->tun_key->ipv4_tos))) {
__be16 protocol = skb->protocol;
skb_set_network_header(skb, ETH_HLEN);
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return;
}
}
-/* Called with rcu_read_lock. */
-void tnl_rcv(struct vport *vport, struct sk_buff *skb)
+/**
+ * ovs_tnl_rcv - ingress point for generic tunnel code
+ *
+ * @vport: port this packet was received on
+ * @skb: received packet
+ * @tos: ToS from encapsulating IP packet, used to copy ECN bits
+ *
+ * Must be called with rcu_read_lock.
+ *
+ * Packets received by this function are in the following state:
+ * - skb->data points to the inner Ethernet header.
+ * - The inner Ethernet header is in the linear data area.
+ * - skb->csum does not include the inner Ethernet header.
+ * - The layer pointers are undefined.
+ */
+void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
{
- /* Packets received by this function are in the following state:
- * - skb->data points to the inner Ethernet header.
- * - The inner Ethernet header is in the linear data area.
- * - skb->csum does not include the inner Ethernet header.
- * - The layer pointers point at the outer headers.
- */
+ struct ethhdr *eh;
- struct ethhdr *eh = (struct ethhdr *)skb->data;
+ skb_reset_mac_header(skb);
+ eh = eth_hdr(skb);
if (likely(ntohs(eh->h_proto) >= 1536))
skb->protocol = eh->h_proto;
skb_dst_drop(skb);
nf_reset(skb);
+ skb_clear_rxhash(skb);
secpath_reset(skb);
ecn_decapsulate(skb);
- compute_ip_summed(skb, false);
+ vlan_set_tci(skb, 0);
+
+ if (unlikely(compute_ip_summed(skb, false))) {
+ kfree_skb(skb);
+ return;
+ }
- vport_receive(vport, skb);
+ ovs_vport_receive(vport, skb);
}
static bool check_ipv4_address(__be32 addr)
int addr_type;
int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ __be16 frag_off;
/* Check source address is valid. */
addr_type = ipv6_addr_type(&old_ipv6h->saddr);
return false;
/* Don't respond to ICMP error messages. */
- payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
+ payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
if (payload_off < 0)
return false;
+ payload_length);
ipv6h->nexthdr = NEXTHDR_ICMP;
ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
- ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
- ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
+ ipv6h->daddr = old_ipv6h->saddr;
+ ipv6h->saddr = old_ipv6h->daddr;
/* ICMPv6 */
icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
}
#endif /* IPv6 */
-bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
+bool ovs_tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
+ struct sk_buff *skb, unsigned int mtu)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
}
#endif
- total_length = min(total_length, mutable->mtu);
payload_length = total_length - header_length;
nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
vh->h_vlan_encapsulated_proto = skb->protocol;
- }
+ } else
+ vlan_set_tci(nskb, vlan_get_tci(skb));
skb_reset_mac_header(nskb);
/* Protocol */
ipv6_build_icmp(skb, nskb, mtu, payload_length);
#endif
- /*
- * Assume that flow based keys are symmetric with respect to input
- * and output and use the key that we were going to put on the
- * outgoing packet for the fake received packet. If the keys are
- * not symmetric then PMTUD needs to be disabled since we won't have
- * any way of synthesizing packets.
- */
- if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
- (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
- OVS_CB(nskb)->tun_id = flow_key;
+ if (unlikely(compute_ip_summed(nskb, false))) {
+ kfree_skb(nskb);
+ return false;
+ }
- compute_ip_summed(nskb, false);
- vport_receive(vport, nskb);
+ ovs_vport_receive(vport, nskb);
return true;
}
static bool check_mtu(struct sk_buff *skb,
struct vport *vport,
const struct tnl_mutable_config *mutable,
- const struct rtable *rt, __be16 *frag_offp)
+ const struct rtable *rt, __be16 *frag_offp,
+ int tunnel_hlen)
{
- int mtu;
+ bool df_inherit;
+ bool pmtud;
__be16 frag_off;
+ int mtu = 0;
+ unsigned int packet_length = skb->len - ETH_HLEN;
+
+ if (OVS_CB(skb)->tun_key->ipv4_dst) {
+ df_inherit = false;
+ pmtud = false;
+ frag_off = OVS_CB(skb)->tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT ?
+ htons(IP_DF) : 0;
+ } else {
+ df_inherit = mutable->flags & TNL_F_DF_INHERIT;
+ pmtud = mutable->flags & TNL_F_PMTUD;
+ frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
+ }
+
+ /* Allow for one level of tagging in the packet length. */
+ if (!vlan_tx_tag_present(skb) &&
+ eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+ packet_length -= VLAN_HLEN;
+
+ if (pmtud) {
+ int vlan_header = 0;
+
+ /* The tag needs to go in packet regardless of where it
+ * currently is, so subtract it from the MTU.
+ */
+ if (vlan_tx_tag_present(skb) ||
+ eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+ vlan_header = VLAN_HLEN;
- frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
- if (frag_off)
mtu = dst_mtu(&rt_dst(rt))
- ETH_HLEN
- - mutable->tunnel_hlen
- - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
- else
- mtu = mutable->mtu;
+ - tunnel_hlen
+ - vlan_header;
+ }
if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *old_iph = ip_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (df_inherit)
+ frag_off = iph->frag_off & htons(IP_DF);
- frag_off |= old_iph->frag_off & htons(IP_DF);
- mtu = max(mtu, IP_MIN_MTU);
+ if (pmtud && iph->frag_off & htons(IP_DF)) {
+ mtu = max(mtu, IP_MIN_MTU);
- if ((old_iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(old_iph->tot_len)) {
- if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
- goto drop;
+ if (packet_length > mtu &&
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu))
+ return false;
}
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
- unsigned int packet_length = skb->len - ETH_HLEN
- - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
-
- mtu = max(mtu, IPV6_MIN_MTU);
-
- /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
- if (packet_length > IPV6_MIN_MTU)
+ /* IPv6 requires end hosts to do fragmentation
+ * if the packet is above the minimum MTU.
+ */
+ if (df_inherit && packet_length > IPV6_MIN_MTU)
frag_off = htons(IP_DF);
- if (mtu < packet_length) {
- if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
- goto drop;
+ if (pmtud) {
+ mtu = max(mtu, IPV6_MIN_MTU);
+
+ if (packet_length > mtu &&
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu))
+ return false;
}
}
#endif
*frag_offp = frag_off;
return true;
-
-drop:
- *frag_offp = 0;
- return false;
-}
-
-static void create_tunnel_header(const struct vport *vport,
- const struct tnl_mutable_config *mutable,
- const struct rtable *rt, void *header)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct iphdr *iph = header;
-
- iph->version = 4;
- iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = htons(IP_DF);
- iph->protocol = tnl_vport->tnl_ops->ipproto;
- iph->tos = mutable->port_config.tos;
- iph->daddr = rt->rt_dst;
- iph->saddr = rt->rt_src;
- iph->ttl = mutable->port_config.ttl;
- if (!iph->ttl)
- iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
-
- tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
-}
-
-static inline void *get_cached_header(const struct tnl_cache *cache)
-{
- return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
-}
-
-static inline bool check_cache_valid(const struct tnl_cache *cache,
- const struct tnl_mutable_config *mutable)
-{
- return cache &&
-#ifdef NEED_CACHE_TIMEOUT
- time_before(jiffies, cache->expiration) &&
-#endif
-#ifdef HAVE_RT_GENID
- atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
-#endif
-#ifdef HAVE_HH_SEQ
- rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
-#endif
- mutable->seq == cache->mutable_seq &&
- (!is_internal_dev(rt_dst(cache->rt).dev) ||
- (cache->flow && !cache->flow->dead));
-}
-
-static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
-{
- struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
- const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
- const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
-
- if (cache && !check_cache_valid(cache, mutable) &&
- spin_trylock_bh(&tnl_vport->cache_lock)) {
- assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
- spin_unlock_bh(&tnl_vport->cache_lock);
- }
-
- return 0;
-}
-
-static void cache_cleaner(struct work_struct *work)
-{
- schedule_cache_cleaner();
-
- rcu_read_lock();
- tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
- rcu_read_unlock();
}
-static inline void create_eth_hdr(struct tnl_cache *cache,
- const struct rtable *rt)
+static struct rtable *find_route(struct net *net,
+ __be32 *saddr, __be32 daddr, u8 ipproto,
+ u8 tos)
{
- void *cache_data = get_cached_header(cache);
- int hh_len = rt_dst(rt).hh->hh_len;
- int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
-
-#ifdef HAVE_HH_SEQ
- unsigned hh_seq;
-
- do {
- hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
- memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
- } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
-
- cache->hh_seq = hh_seq;
+ struct rtable *rt;
+ /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
+ * router expect RT_TOS bits only. */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ struct flowi fl = { .nl_u = { .ip4_u = {
+ .daddr = daddr,
+ .saddr = *saddr,
+ .tos = RT_TOS(tos) } },
+ .proto = ipproto };
+
+ if (unlikely(ip_route_output_key(net, &rt, &fl)))
+ return ERR_PTR(-EADDRNOTAVAIL);
+ *saddr = fl.nl_u.ip4_u.saddr;
+ return rt;
#else
- read_lock_bh(&rt_dst(rt).hh->hh_lock);
- memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
- read_unlock_bh(&rt_dst(rt).hh->hh_lock);
+ struct flowi4 fl = { .daddr = daddr,
+ .saddr = *saddr,
+ .flowi4_tos = RT_TOS(tos),
+ .flowi4_proto = ipproto };
+
+ rt = ip_route_output_key(net, &fl);
+ *saddr = fl.saddr;
+ return rt;
#endif
}
-static struct tnl_cache *build_cache(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct rtable *rt)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cache;
- void *cache_data;
- int cache_len;
-
- if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
- return NULL;
-
- /*
- * If there is no entry in the ARP cache or if this device does not
- * support hard header caching just fall back to the IP stack.
- */
- if (!rt_dst(rt).hh)
- return NULL;
-
- /*
- * If lock is contended fall back to directly building the header.
- * We're not going to help performance by sitting here spinning.
- */
- if (!spin_trylock_bh(&tnl_vport->cache_lock))
- return NULL;
-
- cache = tnl_vport->cache;
- if (check_cache_valid(cache, mutable))
- goto unlock;
- else
- cache = NULL;
-
- cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
-
- cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
- cache_len, GFP_ATOMIC);
- if (!cache)
- goto unlock;
-
- cache->len = cache_len;
-
- create_eth_hdr(cache, rt);
- cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
-
- create_tunnel_header(vport, mutable, rt, cache_data);
-
- cache->mutable_seq = mutable->seq;
- cache->rt = rt;
-#ifdef NEED_CACHE_TIMEOUT
- cache->expiration = jiffies + tnl_vport->cache_exp_interval;
-#endif
-
- if (is_internal_dev(rt_dst(rt).dev)) {
- struct odp_flow_key flow_key;
- struct tbl_node *flow_node;
- struct vport *vport;
- struct sk_buff *skb;
- bool is_frag;
- int err;
-
- vport = internal_dev_get_vport(rt_dst(rt).dev);
- if (!vport)
- goto done;
-
- skb = alloc_skb(cache->len, GFP_ATOMIC);
- if (!skb)
- goto done;
-
- __skb_put(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
-
- err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
-
- kfree_skb(skb);
- if (err || is_frag)
- goto done;
-
- flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
- &flow_key, flow_hash(&flow_key),
- flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
-
- cache->flow = flow;
- flow_hold(flow);
- }
- }
-
-done:
- assign_cache_rcu(vport, cache);
-
-unlock:
- spin_unlock_bh(&tnl_vport->cache_lock);
-
- return cache;
-}
-
-static struct rtable *find_route(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- u8 tos, struct tnl_cache **cache)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
-
- *cache = NULL;
- tos = RT_TOS(tos);
-
- if (likely(tos == mutable->port_config.tos &&
- check_cache_valid(cur_cache, mutable))) {
- *cache = cur_cache;
- return cur_cache->rt;
- } else {
- struct rtable *rt;
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = mutable->port_config.daddr,
- .saddr = mutable->port_config.saddr,
- .tos = tos } },
- .proto = tnl_vport->tnl_ops->ipproto };
-
- if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
- return NULL;
-
- if (likely(tos == mutable->port_config.tos))
- *cache = build_cache(vport, mutable, rt);
-
- return rt;
- }
-}
-
-static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
-{
- if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
- struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
- if (unlikely(!nskb)) {
- kfree_skb(skb);
- return ERR_PTR(-ENOMEM);
- }
-
- set_skb_csum_bits(skb, nskb);
-
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
-
- kfree_skb(skb);
- return nskb;
- }
-
- return skb;
-}
-
-static inline bool need_linearize(const struct sk_buff *skb)
+static bool need_linearize(const struct sk_buff *skb)
{
int i;
* change them from underneath us and we can skip the linearization.
*/
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
+ if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
return true;
return false;
static struct sk_buff *handle_offloads(struct sk_buff *skb,
const struct tnl_mutable_config *mutable,
- const struct rtable *rt)
+ const struct rtable *rt,
+ int tunnel_hlen)
{
int min_headroom;
int err;
- forward_ip_summed(skb);
-
- err = vswitch_skb_checksum_setup(skb);
- if (unlikely(err))
- goto error_free;
-
min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
- + mutable->tunnel_hlen;
+ + tunnel_hlen
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+ int head_delta = SKB_DATA_ALIGN(min_headroom -
+ skb_headroom(skb) +
+ 16);
+ err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+ 0, GFP_ATOMIC);
+ if (unlikely(err))
+ goto error_free;
+ }
+
+ forward_ip_summed(skb, true);
if (skb_is_gso(skb)) {
struct sk_buff *nskb;
- /*
- * If we are doing GSO on a pskb it is better to make sure that
- * the headroom is correct now. We will only have to copy the
- * portion in the linear data area and GSO will preserve
- * headroom when it creates the segments. This is particularly
- * beneficial on Xen where we get a lot of GSO pskbs.
- * Conversely, we avoid copying if it is just to get our own
- * writable clone because GSO will do the copy for us.
- */
- if (skb_headroom(skb) < min_headroom) {
- skb = check_headroom(skb, min_headroom);
- if (unlikely(IS_ERR(skb))) {
- err = PTR_ERR(skb);
- goto error;
- }
- }
-
nskb = skb_gso_segment(skb, 0);
- kfree_skb(skb);
- if (unlikely(IS_ERR(nskb))) {
+ if (IS_ERR(nskb)) {
+ kfree_skb(skb);
err = PTR_ERR(nskb);
goto error;
}
+ consume_skb(skb);
skb = nskb;
- } else {
- skb = check_headroom(skb, min_headroom);
- if (unlikely(IS_ERR(skb))) {
- err = PTR_ERR(skb);
- goto error;
- }
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- /*
- * Pages aren't locked and could change at any time.
- * If this happens after we compute the checksum, the
- * checksum will be wrong. We linearize now to avoid
- * this problem.
- */
- if (unlikely(need_linearize(skb))) {
- err = __skb_linearize(skb);
- if (unlikely(err))
- goto error_free;
- }
-
- err = skb_checksum_help(skb);
+ } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
+ /* Pages aren't locked and could change at any time.
+ * If this happens after we compute the checksum, the
+ * checksum will be wrong. We linearize now to avoid
+ * this problem.
+ */
+ if (unlikely(need_linearize(skb))) {
+ err = __skb_linearize(skb);
if (unlikely(err))
goto error_free;
- } else if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ err = skb_checksum_help(skb);
+ if (unlikely(err))
+ goto error_free;
}
+ set_ip_summed(skb, OVS_CSUM_NONE);
+
return skb;
error_free:
}
static int send_frags(struct sk_buff *skb,
- const struct tnl_mutable_config *mutable)
+ int tunnel_hlen)
{
int sent_len;
- int err;
sent_len = 0;
while (skb) {
struct sk_buff *next = skb->next;
- int frag_len = skb->len - mutable->tunnel_hlen;
+ int frag_len = skb->len - tunnel_hlen;
+ int err;
skb->next = NULL;
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(skb);
- if (likely(net_xmit_eval(err) == 0))
- sent_len += frag_len;
- else {
- skb = next;
- goto free_frags;
- }
-
skb = next;
+ if (unlikely(net_xmit_eval(err)))
+ goto free_frags;
+ sent_len += frag_len;
}
return sent_len;
* dropped so just free the rest. This may help improve the congestion
* that caused the first packet to be dropped.
*/
- tnl_free_linked_skbs(skb);
+ ovs_tnl_free_linked_skbs(skb);
return sent_len;
}
-int tnl_send(struct vport *vport, struct sk_buff *skb)
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
-
enum vport_err_type err = VPORT_E_TX_ERROR;
struct rtable *rt;
- struct dst_entry *unattached_dst = NULL;
- struct tnl_cache *cache;
+ struct ovs_key_ipv4_tunnel tun_key;
int sent_len = 0;
- __be16 frag_off;
+ int tunnel_hlen;
+ __be16 frag_off = 0;
+ __be32 daddr;
+ __be32 saddr;
u8 ttl;
- u8 inner_tos;
u8 tos;
/* Validate the protocol headers before we try to use them. */
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !vlan_tx_tag_present(skb)) {
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
goto error_free;
}
#endif
- /* ToS */
- if (skb->protocol == htons(ETH_P_IP))
- inner_tos = ip_hdr(skb)->tos;
+ /* If OVS_CB(skb)->tun_key is NULL, point it at the local tun_key here,
+ * and zero it out.
+ */
+ if (!OVS_CB(skb)->tun_key) {
+ memset(&tun_key, 0, sizeof(tun_key));
+ OVS_CB(skb)->tun_key = &tun_key;
+ }
+
+ tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, OVS_CB(skb)->tun_key);
+ if (unlikely(tunnel_hlen < 0)) {
+ err = VPORT_E_TX_DROPPED;
+ goto error_free;
+ }
+ tunnel_hlen += sizeof(struct iphdr);
+
+ if (OVS_CB(skb)->tun_key->ipv4_dst) {
+ daddr = OVS_CB(skb)->tun_key->ipv4_dst;
+ saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ tos = OVS_CB(skb)->tun_key->ipv4_tos;
+ ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
+ } else {
+ u8 inner_tos;
+ daddr = mutable->key.daddr;
+ saddr = mutable->key.saddr;
+
+ if (unlikely(!daddr)) {
+ /* Trying to sent packet from Null-port without
+ * tunnel info? Drop this packet. */
+ err = VPORT_E_TX_DROPPED;
+ goto error_free;
+ }
+
+ /* ToS */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_tos = ip_hdr(skb)->tos;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- else if (skb->protocol == htons(ETH_P_IPV6))
- inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
#endif
- else
- inner_tos = 0;
+ else
+ inner_tos = 0;
- if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
- tos = inner_tos;
- else
- tos = mutable->port_config.tos;
+ if (mutable->flags & TNL_F_TOS_INHERIT)
+ tos = inner_tos;
+ else
+ tos = mutable->tos;
+
+ tos = INET_ECN_encapsulate(tos, inner_tos);
- tos = INET_ECN_encapsulate(tos, inner_tos);
+ /* TTL */
+ ttl = mutable->ttl;
+ if (mutable->flags & TNL_F_TTL_INHERIT) {
+ if (skb->protocol == htons(ETH_P_IP))
+ ttl = ip_hdr(skb)->ttl;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ttl = ipv6_hdr(skb)->hop_limit;
+#endif
+ }
+
+ }
/* Route lookup */
- rt = find_route(vport, mutable, tos, &cache);
- if (unlikely(!rt))
+ rt = find_route(port_key_get_net(&mutable->key), &saddr, daddr,
+ tnl_vport->tnl_ops->ipproto, tos);
+ if (IS_ERR(rt))
goto error_free;
- if (unlikely(!cache))
- unattached_dst = &rt_dst(rt);
/* Reset SKB */
nf_reset(skb);
secpath_reset(skb);
skb_dst_drop(skb);
+ skb_clear_rxhash(skb);
/* Offloading */
- skb = handle_offloads(skb, mutable, rt);
- if (unlikely(IS_ERR(skb)))
- goto error;
+ skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
+ if (IS_ERR(skb)) {
+ skb = NULL;
+ goto err_free_rt;
+ }
/* MTU */
- if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
+ if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off, tunnel_hlen))) {
err = VPORT_E_TX_DROPPED;
- goto error_free;
+ goto err_free_rt;
}
- /*
- * If we are over the MTU, allow the IP stack to handle fragmentation.
- * Fragmentation is a slow path anyways.
- */
- if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
- cache)) {
- unattached_dst = &rt_dst(rt);
- dst_hold(unattached_dst);
- cache = NULL;
- }
-
- /* TTL */
- ttl = mutable->port_config.ttl;
- if (!ttl)
- ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
-
- if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
- if (skb->protocol == htons(ETH_P_IP))
- ttl = ip_hdr(skb)->ttl;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- else if (skb->protocol == htons(ETH_P_IPV6))
- ttl = ipv6_hdr(skb)->hop_limit;
-#endif
+ /* TTL Fixup. */
+ if (!OVS_CB(skb)->tun_key->ipv4_dst) {
+ if (!(mutable->flags & TNL_F_TTL_INHERIT)) {
+ if (!ttl)
+ ttl = ip4_dst_hoplimit(&rt_dst(rt));
+ }
}
while (skb) {
struct sk_buff *next_skb = skb->next;
skb->next = NULL;
- if (likely(cache)) {
- skb_push(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
- skb_reset_mac_header(skb);
- skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
-
- } else {
- skb_push(skb, mutable->tunnel_hlen);
- create_tunnel_header(vport, mutable, rt, skb->data);
- skb_reset_network_header(skb);
-
- if (next_skb)
- skb_dst_set(skb, dst_clone(unattached_dst));
- else {
- skb_dst_set(skb, unattached_dst);
- unattached_dst = NULL;
- }
- }
- skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
+ if (unlikely(vlan_deaccel_tag(skb)))
+ goto next;
+
+ skb_push(skb, tunnel_hlen);
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ if (next_skb)
+ skb_dst_set(skb, dst_clone(&rt_dst(rt)));
+ else
+ skb_dst_set(skb, &rt_dst(rt));
+
+ /* Push IP header. */
iph = ip_hdr(skb);
- iph->tos = tos;
- iph->ttl = ttl;
- iph->frag_off = frag_off;
+ iph->version = 4;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->protocol = tnl_vport->tnl_ops->ipproto;
+ iph->daddr = daddr;
+ iph->saddr = saddr;
+ iph->tos = tos;
+ iph->ttl = ttl;
+ iph->frag_off = frag_off;
ip_select_ident(iph, &rt_dst(rt), NULL);
- skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
+ /* Push Tunnel header. */
+ skb = tnl_vport->tnl_ops->build_header(vport, mutable,
+ &rt_dst(rt), skb, tunnel_hlen);
if (unlikely(!skb))
goto next;
- if (likely(cache)) {
- int orig_len = skb->len - cache->len;
- struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
-
- skb->protocol = htons(ETH_P_IP);
- iph->tot_len = htons(skb->len - skb_network_offset(skb));
- ip_send_check(iph);
-
- if (cache_vport) {
- OVS_CB(skb)->flow = cache->flow;
- compute_ip_summed(skb, true);
- vport_receive(cache_vport, skb);
- sent_len += orig_len;
- } else {
- int err;
-
- skb->dev = rt_dst(rt).dev;
- err = dev_queue_xmit(skb);
-
- if (likely(net_xmit_eval(err) == 0))
- sent_len += orig_len;
- }
- } else
- sent_len += send_frags(skb, mutable);
+ sent_len += send_frags(skb, tunnel_hlen);
next:
skb = next_skb;
}
if (unlikely(sent_len == 0))
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
- goto out;
+ return sent_len;
+err_free_rt:
+ ip_rt_put(rt);
error_free:
- tnl_free_linked_skbs(skb);
-error:
- dst_release(unattached_dst);
- vport_record_error(vport, err);
-out:
+ ovs_tnl_free_linked_skbs(skb);
+ ovs_vport_record_error(vport, err);
return sent_len;
}
-static int set_config(const void *config, const struct tnl_ops *tnl_ops,
- const struct vport *cur_vport,
- struct tnl_mutable_config *mutable)
+static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
+ [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
+ [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
+ [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
+ [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
+ [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
+ [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
+ [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
+};
+
+/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
+ * zeroed. */
+static int tnl_set_config(struct net *net, struct nlattr *options,
+ const struct tnl_ops *tnl_ops,
+ const struct vport *cur_vport,
+ struct tnl_mutable_config *mutable)
{
const struct vport *old_vport;
const struct tnl_mutable_config *old_mutable;
+ struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
+ int err;
- mutable->port_config = *(struct tnl_port_config *)config;
+ port_key_set_net(&mutable->key, net);
+ mutable->key.tunnel_type = tnl_ops->tunnel_type;
+ if (!options)
+ goto out;
- if (mutable->port_config.daddr == 0)
- return -EINVAL;
+ err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
+ if (err)
+ return err;
- if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
+ if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
return -EINVAL;
- mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
- if (mutable->tunnel_hlen < 0)
- return mutable->tunnel_hlen;
+ mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
+ mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
- mutable->tunnel_hlen += sizeof(struct iphdr);
+ if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
+ if (ipv4_is_multicast(mutable->key.daddr))
+ return -EINVAL;
+ mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
+ }
- mutable->tunnel_type = tnl_ops->tunnel_type;
- if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
- mutable->tunnel_type |= TNL_T_KEY_MATCH;
- mutable->port_config.in_key = 0;
- } else
- mutable->tunnel_type |= TNL_T_KEY_EXACT;
+ if (a[OVS_TUNNEL_ATTR_TOS]) {
+ mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
+ /* Reject ToS config with ECN bits set. */
+ if (mutable->tos & INET_ECN_MASK)
+ return -EINVAL;
+ }
- old_vport = tnl_find_port(mutable->port_config.saddr,
- mutable->port_config.daddr,
- mutable->port_config.in_key,
- mutable->tunnel_type,
- &old_mutable);
+ if (a[OVS_TUNNEL_ATTR_TTL])
+ mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
+ if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
+ mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
+ mutable->flags |= TNL_F_IN_KEY_MATCH;
+ } else {
+ mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
+ mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
+ }
+
+ if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
+ mutable->flags |= TNL_F_OUT_KEY_ACTION;
+ else
+ mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
+
+ mutable->mlink = 0;
+ if (ipv4_is_multicast(mutable->key.daddr)) {
+ struct net_device *dev;
+ struct rtable *rt;
+ __be32 saddr = mutable->key.saddr;
+
+ rt = find_route(port_key_get_net(&mutable->key),
+ &saddr, mutable->key.daddr,
+ tnl_ops->ipproto, mutable->tos);
+ if (IS_ERR(rt))
+ return -EADDRNOTAVAIL;
+ dev = rt_dst(rt).dev;
+ ip_rt_put(rt);
+ if (__in_dev_get_rtnl(dev) == NULL)
+ return -EADDRNOTAVAIL;
+ mutable->mlink = dev->ifindex;
+ ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
+ }
+
+out:
+ old_vport = port_table_lookup(&mutable->key, &old_mutable);
if (old_vport && old_vport != cur_vport)
return -EEXIST;
- if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
- mutable->port_config.out_key = 0;
-
return 0;
}
-struct vport *tnl_create(const struct vport_parms *parms,
- const struct vport_ops *vport_ops,
- const struct tnl_ops *tnl_ops)
+struct vport *ovs_tnl_create(const struct vport_parms *parms,
+ const struct vport_ops *vport_ops,
+ const struct tnl_ops *tnl_ops)
{
struct vport *vport;
struct tnl_vport *tnl_vport;
+ struct tnl_mutable_config *mutable;
int initial_frag_id;
int err;
- vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
strcpy(tnl_vport->name, parms->name);
tnl_vport->tnl_ops = tnl_ops;
- tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
- if (!tnl_vport->mutable) {
+ mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ if (!mutable) {
err = -ENOMEM;
goto error_free_vport;
}
- vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
- tnl_vport->mutable->mtu = ETH_DATA_LEN;
+ random_ether_addr(mutable->eth_addr);
get_random_bytes(&initial_frag_id, sizeof(int));
atomic_set(&tnl_vport->frag_id, initial_frag_id);
- err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
+ err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
+ NULL, mutable);
if (err)
goto error_free_mutable;
- spin_lock_init(&tnl_vport->cache_lock);
-
-#ifdef NEED_CACHE_TIMEOUT
- tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
- (net_random() % (MAX_CACHE_EXP / 2));
-#endif
-
- err = add_port(vport);
- if (err)
- goto error_free_mutable;
+ rcu_assign_pointer(tnl_vport->mutable, mutable);
+ port_table_add_port(vport);
return vport;
error_free_mutable:
- kfree(tnl_vport->mutable);
+ free_mutable_rtnl(mutable);
+ kfree(mutable);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
-int tnl_modify(struct vport *vport, struct odp_port *port)
+int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+ const struct tnl_mutable_config *old_mutable;
struct tnl_mutable_config *mutable;
int err;
- mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ old_mutable = rtnl_dereference(tnl_vport->mutable);
+ if (!old_mutable->key.daddr)
+ return -EINVAL;
+
+ mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
if (!mutable) {
err = -ENOMEM;
goto error;
}
- err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
- if (err)
- goto error_free;
-
- mutable->seq++;
+ /* Copy fields whose values should be retained. */
+ mutable->seq = old_mutable->seq + 1;
+ memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
- err = move_port(vport, mutable);
+ /* Parse the others configured by userspace. */
+ err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
+ vport, mutable);
if (err)
goto error_free;
+ if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
+ port_table_move_port(vport, mutable);
+ else
+ assign_config_rcu(vport, mutable);
+
return 0;
error_free:
+ free_mutable_rtnl(mutable);
kfree(mutable);
error:
return err;
}
-static void free_port_rcu(struct rcu_head *rcu)
+int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
{
- struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
+ const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+ const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
+
+ if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
+ mutable->flags & TNL_F_PUBLIC) ||
+ nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
+ goto nla_put_failure;
+
+ if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
+ nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
+ goto nla_put_failure;
+ if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
+ nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
+ goto nla_put_failure;
+ if (mutable->key.saddr &&
+ nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
+ goto nla_put_failure;
+ if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
+ goto nla_put_failure;
+ if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
+ goto nla_put_failure;
- spin_lock_bh(&tnl_vport->cache_lock);
- free_cache(tnl_vport->cache);
- spin_unlock_bh(&tnl_vport->cache_lock);
+ return 0;
- kfree(tnl_vport->mutable);
- vport_free(tnl_vport_to_vport(tnl_vport));
+nla_put_failure:
+ return -EMSGSIZE;
}
-int tnl_destroy(struct vport *vport)
+static void free_port_rcu(struct rcu_head *rcu)
{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- const struct tnl_mutable_config *old_mutable;
-
- if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
- tnl_vport->mutable->port_config.daddr,
- tnl_vport->mutable->port_config.in_key,
- tnl_vport->mutable->tunnel_type,
- &old_mutable))
- del_port(vport);
-
- call_rcu(&tnl_vport->rcu, free_port_rcu);
+ struct tnl_vport *tnl_vport = container_of(rcu,
+ struct tnl_vport, rcu);
- return 0;
+ kfree((struct tnl_mutable __force *)tnl_vport->mutable);
+ ovs_vport_free(tnl_vport_to_vport(tnl_vport));
}
-int tnl_set_mtu(struct vport *vport, int mtu)
+void ovs_tnl_destroy(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *mutable;
- mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
- if (!mutable)
- return -ENOMEM;
-
- mutable->mtu = mtu;
- assign_config_rcu(vport, mutable);
-
- return 0;
+ mutable = rtnl_dereference(tnl_vport->mutable);
+ port_table_remove_port(vport);
+ free_mutable_rtnl(mutable);
+ call_rcu(&tnl_vport->rcu, free_port_rcu);
}
-int tnl_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_mutable_config *mutable;
+ struct tnl_mutable_config *old_mutable, *mutable;
- mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ old_mutable = rtnl_dereference(tnl_vport->mutable);
+ mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
if (!mutable)
return -ENOMEM;
+ old_mutable->mlink = 0;
+
memcpy(mutable->eth_addr, addr, ETH_ALEN);
assign_config_rcu(vport, mutable);
return 0;
}
-const char *tnl_get_name(const struct vport *vport)
+const char *ovs_tnl_get_name(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
return tnl_vport->name;
}
-const unsigned char *tnl_get_addr(const struct vport *vport)
-{
- const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- return rcu_dereference(tnl_vport->mutable)->eth_addr;
-}
-
-int tnl_get_mtu(const struct vport *vport)
+const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- return rcu_dereference(tnl_vport->mutable)->mtu;
+ return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
}
-void tnl_free_linked_skbs(struct sk_buff *skb)
+void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
{
- if (unlikely(!skb))
- return;
-
while (skb) {
struct sk_buff *next = skb->next;
kfree_skb(skb);
skb = next;
}
}
+
+int ovs_tnl_init(void)
+{
+ int i;
+
+ port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
+ GFP_KERNEL);
+ if (!port_table)
+ return -ENOMEM;
+
+ for (i = 0; i < PORT_TABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&port_table[i]);
+
+ return 0;
+}
+
+void ovs_tnl_exit(void)
+{
+ kfree(port_table);
+}