#include <net/icmp.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/ipv6.h>
+#endif
#include <net/protocol.h>
#include <net/route.h>
#include <net/xfrm.h>
#include "openvswitch/gre.h"
#include "table.h"
#include "vport.h"
+#include "vport-generic.h"
/* The absolute minimum fragment size. Note that there are many other
* definitions of the minimum MTU. */
* number of options. */
#define GRE_HEADER_SECTION 4
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+};
+
struct mutable_config {
struct rcu_head rcu;
};
struct gre_vport {
+ struct rcu_head rcu;
struct tbl_node tbl_node;
char name[IFNAMSIZ];
struct mutable_config *mutable;
};
-struct vport_ops gre_vport_ops;
-
/* Protected by RCU. */
static struct tbl *port_table;
static unsigned int local_remote_ports;
static unsigned int remote_ports;
-static inline struct gre_vport *
-gre_vport_priv(const struct vport *vport)
+static inline struct gre_vport *gre_vport_priv(const struct vport *vport)
{
return vport_priv(vport);
}
-static inline struct vport *
-gre_vport_to_vport(const struct gre_vport *gre_vport)
+static inline struct vport *gre_vport_to_vport(const struct gre_vport *gre_vport)
{
return vport_from_priv(gre_vport);
}
-static inline struct gre_vport *
-gre_vport_table_cast(const struct tbl_node *node)
+static inline struct gre_vport *gre_vport_table_cast(const struct tbl_node *node)
{
return container_of(node, struct gre_vport, tbl_node);
}
/* RCU callback. */
-static void
-free_config(struct rcu_head *rcu)
+static void free_config(struct rcu_head *rcu)
{
struct mutable_config *c = container_of(rcu, struct mutable_config, rcu);
kfree(c);
}
-static void
-assign_config_rcu(struct vport *vport, struct mutable_config *new_config)
+static void assign_config_rcu(struct vport *vport,
+ struct mutable_config *new_config)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
struct mutable_config *old_config;
call_rcu(&old_config->rcu, free_config);
}
-static unsigned int *
-find_port_pool(const struct mutable_config *mutable)
+static unsigned int *find_port_pool(const struct mutable_config *mutable)
{
if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
if (mutable->port_config.saddr)
/* Modifies 'target' to store the rcu_dereferenced pointer that was used to do
* the comparision. */
-static int
-port_cmp(const struct tbl_node *node, void *target)
+static int port_cmp(const struct tbl_node *node, void *target)
{
const struct gre_vport *gre_vport = gre_vport_table_cast(node);
struct port_lookup_key *lookup = target;
lookup->mutable->port_config.saddr == lookup->vals[LOOKUP_SADDR];
}
-static u32
-port_hash(struct port_lookup_key *lookup)
+static u32 port_hash(struct port_lookup_key *lookup)
{
return jhash2(lookup->vals, ARRAY_SIZE(lookup->vals), 0);
}
-static int
-add_port(struct vport *vport)
+static int add_port(struct vport *vport)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
struct port_lookup_key lookup;
return 0;
}
-static int
-del_port(struct vport *vport)
+static int del_port(struct vport *vport)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
int err;
#define FIND_PORT_MATCH (1 << 1)
#define FIND_PORT_ANY (FIND_PORT_KEY | FIND_PORT_MATCH)
-static struct vport *
-find_port(__be32 saddr, __be32 daddr, __be32 key, int port_type,
- const struct mutable_config **mutable)
+static struct vport *find_port(__be32 saddr, __be32 daddr, __be32 key,
+ int port_type,
+ const struct mutable_config **mutable)
{
struct port_lookup_key lookup;
struct tbl *table = rcu_dereference(port_table);
return gre_vport_to_vport(gre_vport_table_cast(tbl_node));
}
-static bool
-check_ipv4_address(__be32 addr)
+static bool check_ipv4_address(__be32 addr)
{
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
|| ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
return true;
}
-static bool
-ipv4_should_icmp(struct sk_buff *skb)
+static bool ipv4_should_icmp(struct sk_buff *skb)
{
struct iphdr *old_iph = ip_hdr(skb);
return true;
}
-static void
-ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
- unsigned int mtu, unsigned int payload_length)
+static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
+ unsigned int mtu, unsigned int payload_length)
{
struct iphdr *iph, *old_iph = ip_hdr(skb);
struct icmphdr *icmph;
icmph->checksum = csum_fold(nskb->csum);
}
-static bool
-ipv6_should_icmp(struct sk_buff *skb)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static bool ipv6_should_icmp(struct sk_buff *skb)
{
struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
int addr_type;
return true;
}
-static void
-ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb, unsigned int mtu,
- unsigned int payload_length)
+static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
+ unsigned int mtu, unsigned int payload_length)
{
struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
struct icmp6hdr *icmp6h;
+ payload_length,
ipv6h->nexthdr, nskb->csum);
}
+#endif /* IPv6 */
-static bool
-send_frag_needed(struct vport *vport, const struct mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
+static bool send_frag_needed(struct vport *vport,
+ const struct mutable_config *mutable,
+ struct sk_buff *skb, unsigned int mtu,
+ __be32 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
- unsigned int total_length, header_length, payload_length;
+ unsigned int total_length = 0, header_length = 0, payload_length;
struct ethhdr *eh, *old_eh = eth_hdr(skb);
struct sk_buff *nskb;
if (!ipv4_should_icmp(skb))
return true;
- } else {
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
return false;
if (!ipv6_should_icmp(skb))
return true;
}
+#endif
+ else
+ return false;
/* Allocate */
if (old_eh->h_proto == htons(ETH_P_8021Q))
header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
total_length = min_t(unsigned int, header_length +
payload_length, 576);
- } else {
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else {
header_length = sizeof(struct ipv6hdr) +
sizeof(struct icmp6hdr);
total_length = min_t(unsigned int, header_length +
payload_length, IPV6_MIN_MTU);
}
+#endif
+
total_length = min(total_length, mutable->mtu);
payload_length = total_length - header_length;
/* Protocol */
if (skb->protocol == htons(ETH_P_IP))
ipv4_build_icmp(skb, nskb, mtu, payload_length);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else
ipv6_build_icmp(skb, nskb, mtu, payload_length);
+#endif
/* Assume that flow based keys are symmetric with respect to input
* and output and use the key that we were going to put on the
return true;
}
-static struct sk_buff *
-check_headroom(struct sk_buff *skb, int headroom)
+static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
{
- if (skb_headroom(skb) < headroom ||
- (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
- struct sk_buff *nskb = skb_realloc_headroom(skb, headroom);
+ if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
+ struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
if (!nskb) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
return skb;
}
-static void
-create_gre_header(struct sk_buff *skb, const struct mutable_config *mutable)
+static void create_gre_header(struct sk_buff *skb,
+ const struct mutable_config *mutable)
{
struct iphdr *iph = ip_hdr(skb);
- __be16 *flags = (__be16 *)(iph + 1);
- __be16 *protocol = flags + 1;
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
__be32 *options = (__be32 *)((u8 *)iph + mutable->tunnel_hlen
- GRE_HEADER_SECTION);
- *protocol = htons(ETH_P_TEB);
- *flags = 0;
+ greh->protocol = htons(ETH_P_TEB);
+ greh->flags = 0;
/* Work backwards over the options so the checksum is last. */
if (mutable->port_config.out_key ||
mutable->port_config.flags & GRE_F_OUT_KEY_ACTION) {
- *flags |= GRE_KEY;
+ greh->flags |= GRE_KEY;
if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
*options = OVS_CB(skb)->tun_id;
}
if (mutable->port_config.flags & GRE_F_OUT_CSUM) {
- *flags |= GRE_CSUM;
+ greh->flags |= GRE_CSUM;
*options = 0;
*(__sum16 *)options = csum_fold(skb_checksum(skb,
}
}
-static int
-check_checksum(struct sk_buff *skb)
+static int check_checksum(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
__be16 flags = *(__be16 *)(iph + 1);
return (csum == 0);
}
-static int
-parse_gre_header(struct iphdr *iph, __be16 *flags, __be32 *key)
+static int parse_gre_header(struct iphdr *iph, __be16 *flags, __be32 *key)
{
/* IP and ICMP protocol handlers check that the IHL is valid. */
- __be16 *flagsp = (__be16 *)((u8 *)iph + (iph->ihl << 2));
- __be16 *protocol = flagsp + 1;
- __be32 *options = (__be32 *)(protocol + 1);
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
+ __be32 *options = (__be32 *)(greh + 1);
int hdr_len;
- *flags = *flagsp;
+ *flags = greh->flags;
- if (*flags & (GRE_VERSION | GRE_ROUTING))
+ if (greh->flags & (GRE_VERSION | GRE_ROUTING))
return -EINVAL;
- if (*protocol != htons(ETH_P_TEB))
+ if (greh->protocol != htons(ETH_P_TEB))
return -EINVAL;
hdr_len = GRE_HEADER_SECTION;
- if (*flags & GRE_CSUM) {
+ if (greh->flags & GRE_CSUM) {
hdr_len += GRE_HEADER_SECTION;
options++;
}
- if (*flags & GRE_KEY) {
+ if (greh->flags & GRE_KEY) {
hdr_len += GRE_HEADER_SECTION;
*key = *options;
} else
*key = 0;
- if (*flags & GRE_SEQ)
+ if (greh->flags & GRE_SEQ)
hdr_len += GRE_HEADER_SECTION;
return hdr_len;
}
-static inline u8
-ecn_encapsulate(u8 tos, struct sk_buff *skb)
+static inline u8 ecn_encapsulate(u8 tos, struct sk_buff *skb)
{
u8 inner;
if (skb->protocol == htons(ETH_P_IP))
inner = ((struct iphdr *)skb_network_header(skb))->tos;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
inner = ipv6_get_dsfield((struct ipv6hdr *)skb_network_header(skb));
+#endif
else
inner = 0;
return INET_ECN_encapsulate(tos, inner);
}
-static inline void
-ecn_decapsulate(u8 tos, struct sk_buff *skb)
+static inline void ecn_decapsulate(u8 tos, struct sk_buff *skb)
{
if (INET_ECN_is_ce(tos)) {
__be16 protocol = skb->protocol;
return;
IP_ECN_set_ce((struct iphdr *)(nw_header + skb->data));
- } else if (protocol == htons(ETH_P_IPV6)) {
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (protocol == htons(ETH_P_IPV6)) {
if (unlikely(!pskb_may_pull(skb, nw_header
+ sizeof(struct ipv6hdr))))
return;
IP6_ECN_set_ce((struct ipv6hdr *)(nw_header
+ skb->data));
}
+#endif
}
}
-static struct sk_buff *
-handle_gso(struct sk_buff *skb)
+static struct sk_buff *handle_gso(struct sk_buff *skb)
{
if (skb_is_gso(skb)) {
- struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG);
+ struct sk_buff *nskb = skb_gso_segment(skb, 0);
dev_kfree_skb(skb);
return nskb;
return skb;
}
-static int
-handle_csum_offload(struct sk_buff *skb)
+static int handle_csum_offload(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL)
return skb_checksum_help(skb);
}
/* Called with rcu_read_lock. */
-static void
-gre_err(struct sk_buff *skb, u32 info)
+static void gre_err(struct sk_buff *skb, u32 info)
{
struct vport *vport;
const struct mutable_config *mutable;
if (skb->protocol == htons(ETH_P_IP))
tot_hdr_len += sizeof(struct iphdr);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
tot_hdr_len += sizeof(struct ipv6hdr);
+#endif
else
goto out;
goto out;
}
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU) {
unsigned int packet_length = sizeof(struct ipv6hdr) +
ntohs(ipv6_hdr(skb)->payload_len);
goto out;
}
}
+#endif
__pskb_pull(skb, tunnel_hdr_len);
send_frag_needed(vport, mutable, skb, mtu, key);
}
/* Called with rcu_read_lock. */
-static int
-gre_rcv(struct sk_buff *skb)
+static int gre_rcv(struct sk_buff *skb)
{
struct vport *vport;
const struct mutable_config *mutable;
return 0;
}
-static int
-build_packet(struct vport *vport, const struct mutable_config *mutable,
- struct iphdr *iph, struct rtable *rt, int max_headroom, int mtu,
- struct sk_buff *skb)
+static int build_packet(struct vport *vport, const struct mutable_config *mutable,
+ struct iphdr *iph, struct rtable *rt, int max_headroom,
+ int mtu, struct sk_buff *skb)
{
int err;
struct iphdr *new_iph;
goto error_free;
}
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
unsigned int packet_length = skb->len - ETH_HLEN
- (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
goto error_free;
}
}
+#endif
skb_reset_transport_header(skb);
new_iph = (struct iphdr *)skb_push(skb, mutable->tunnel_hlen);
return 0;
}
-static int
-gre_send(struct vport *vport, struct sk_buff *skb)
+static int gre_send(struct vport *vport, struct sk_buff *skb)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
const struct mutable_config *mutable = rcu_dereference(gre_vport->mutable);
struct iphdr *old_iph;
- struct ipv6hdr *old_ipv6h;
int orig_len;
struct iphdr iph;
struct rtable *rt;
if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
+ sizeof(struct iphdr) - skb->data)))
skb->protocol = 0;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
+ sizeof(struct ipv6hdr) - skb->data)))
skb->protocol = 0;
}
-
+#endif
old_iph = ip_hdr(skb);
- old_ipv6h = ipv6_hdr(skb);
iph.tos = mutable->port_config.tos;
if (mutable->port_config.flags & GRE_F_TOS_INHERIT) {
if (skb->protocol == htons(ETH_P_IP))
iph.tos = old_iph->tos;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
iph.tos = ipv6_get_dsfield(ipv6_hdr(skb));
+#endif
}
iph.tos = ecn_encapsulate(iph.tos, skb);
if (mutable->port_config.flags & GRE_F_TTL_INHERIT) {
if (skb->protocol == htons(ETH_P_IP))
iph.ttl = old_iph->ttl;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
- iph.ttl = old_ipv6h->hop_limit;
+ iph.ttl = ipv6_hdr(skb)->hop_limit;
+#endif
}
if (!iph.ttl)
iph.ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
if (skb->protocol == htons(ETH_P_IP)) {
iph.frag_off |= old_iph->frag_off & htons(IP_DF);
mtu = max(mtu, IP_MIN_MTU);
-
- } else if (skb->protocol == htons(ETH_P_IPV6))
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (skb->protocol == htons(ETH_P_IPV6))
mtu = max(mtu, IPV6_MIN_MTU);
+#endif
iph.version = 4;
iph.ihl = sizeof(struct iphdr) >> 2;
}
forward_ip_summed(skb);
- vswitch_skb_checksum_setup(skb);
+
+ if (unlikely(vswitch_skb_checksum_setup(skb)))
+ goto error_free;
skb = handle_gso(skb);
if (unlikely(IS_ERR(skb))) {
.err_handler = gre_err,
};
-static int
-gre_init(void)
+static int gre_init(void)
{
int err;
return err;
}
-static void
-gre_exit(void)
+static void gre_exit(void)
{
tbl_destroy(port_table, NULL);
inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
}
-static int
-set_config(const struct vport *cur_vport, struct mutable_config *mutable,
- const void __user *uconfig)
+static int set_config(const struct vport *cur_vport,
+ struct mutable_config *mutable, const void __user *uconfig)
{
const struct vport *old_vport;
const struct mutable_config *old_mutable;
return 0;
}
-static struct vport *
-gre_create(const char *name, const void __user *config)
+static struct vport *gre_create(const char *name, const void __user *config)
{
struct vport *vport;
struct gre_vport *gre_vport;
goto error_free_vport;
}
- vport_gen_ether_addr(gre_vport->mutable->eth_addr);
+ vport_gen_rand_ether_addr(gre_vport->mutable->eth_addr);
gre_vport->mutable->mtu = ETH_DATA_LEN;
err = set_config(NULL, gre_vport->mutable, config);
return ERR_PTR(err);
}
-static int
-gre_modify(struct vport *vport, const void __user *config)
+static int gre_modify(struct vport *vport, const void __user *config)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
struct mutable_config *mutable;
return err;
}
-static int
-gre_destroy(struct vport *vport)
+static void free_port(struct rcu_head *rcu)
+{
+ struct gre_vport *gre_vport = container_of(rcu, struct gre_vport, rcu);
+
+ kfree(gre_vport->mutable);
+ vport_free(gre_vport_to_vport(gre_vport));
+}
+
+static int gre_destroy(struct vport *vport)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
int port_type;
gre_vport->mutable->port_config.in_key, port_type, &old_mutable))
del_port(vport);
- kfree(gre_vport->mutable);
- vport_free(vport);
+ call_rcu(&gre_vport->rcu, free_port);
return 0;
}
-static int
-gre_set_mtu(struct vport *vport, int mtu)
+static int gre_set_mtu(struct vport *vport, int mtu)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
struct mutable_config *mutable;
- struct dp_port *dp_port;
mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
if (!mutable)
mutable->mtu = mtu;
assign_config_rcu(vport, mutable);
- dp_port = vport_get_dp_port(vport);
- if (dp_port)
- set_internal_devs_mtu(dp_port->dp);
-
return 0;
}
-static int
-gre_set_addr(struct vport *vport, const unsigned char *addr)
+static int gre_set_addr(struct vport *vport, const unsigned char *addr)
{
struct gre_vport *gre_vport = gre_vport_priv(vport);
struct mutable_config *mutable;
}
-static const char *
-gre_get_name(const struct vport *vport)
+static const char *gre_get_name(const struct vport *vport)
{
const struct gre_vport *gre_vport = gre_vport_priv(vport);
return gre_vport->name;
}
-static const unsigned char *
-gre_get_addr(const struct vport *vport)
+static const unsigned char *gre_get_addr(const struct vport *vport)
{
const struct gre_vport *gre_vport = gre_vport_priv(vport);
return rcu_dereference(gre_vport->mutable)->eth_addr;
}
-static unsigned
-gre_get_dev_flags(const struct vport *vport)
-{
- return IFF_UP | IFF_RUNNING | IFF_LOWER_UP;
-}
-
-static int
-gre_is_running(const struct vport *vport)
-{
- return 1;
-}
-
-static unsigned char
-gre_get_operstate(const struct vport *vport)
-{
- return IF_OPER_UP;
-}
-
-static int
-gre_get_mtu(const struct vport *vport)
+static int gre_get_mtu(const struct vport *vport)
{
const struct gre_vport *gre_vport = gre_vport_priv(vport);
return rcu_dereference(gre_vport->mutable)->mtu;
.set_addr = gre_set_addr,
.get_name = gre_get_name,
.get_addr = gre_get_addr,
- .get_dev_flags = gre_get_dev_flags,
- .is_running = gre_is_running,
- .get_operstate = gre_get_operstate,
+ .get_dev_flags = vport_gen_get_dev_flags,
+ .is_running = vport_gen_is_running,
+ .get_operstate = vport_gen_get_operstate,
.get_mtu = gre_get_mtu,
.send = gre_send,
};