/*
- * Copyright (c) 2007, 2008, 2009 Nicira Networks.
+ * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
* Distributed under the terms of the GNU GPL version 2.
*
* Significant portions of this file may be copied from parts of the Linux
EXPORT_SYMBOL(dp_ioctl_hook);
/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
- * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
- * maintained by the Generic Netlink code, but the timeout path needs mutual
- * exclusion too.
+ * by dp_mutex.
*
* dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
* lock first.
}
EXPORT_SYMBOL_GPL(get_dp);
-struct datapath *get_dp_locked(int dp_idx)
+static struct datapath *get_dp_locked(int dp_idx)
{
struct datapath *dp;
kfree(dp);
}
-struct kobj_type dp_ktype = {
+static struct kobj_type dp_ktype = {
.release = release_dp
};
if (!dp->table)
goto err_free_dp;
- /* Setup our datapath device */
+ /* Set up our datapath device. */
dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL);
err = PTR_ERR(dp_dev);
if (IS_ERR(dp_dev))
kfree(p);
}
-struct kobj_type brport_ktype = {
+static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
p->port_no = port_no;
p->dp = dp;
p->dev = dev;
+ atomic_set(&p->sflow_pool, 0);
if (!is_dp_dev(dev))
rcu_assign_pointer(dev->br_port, p);
else {
if (err)
goto out_put;
+ set_dp_devs_mtu(dp, dev);
dp_sysfs_add_if(dp->ports[port_no]);
- err = __put_user(port_no, &port.port);
+ err = __put_user(port_no, &portp->port);
out_put:
dev_put(dev);
#error
#endif
-#if defined(CONFIG_XEN) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
-/* This code is copied verbatim from net/dev/core.c in Xen's
- * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions
- * directly because they aren't exported. */
+#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
+/* This code is based on a skb_checksum_setup from net/dev/core.c from a
+ * combination of Lenny's 2.6.26 Xen kernel and Xen's
+ * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
+ * directly because it isn't exported in all versions. */
static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
{
if (ptr < (void *)skb->tail)
int vswitch_skb_checksum_setup(struct sk_buff *skb)
{
- if (skb->proto_csum_blank) {
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
- if (!skb_pull_up_to(skb, skb->nh.iph + 1))
- goto out;
- skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
- switch (skb->nh.iph->protocol) {
- case IPPROTO_TCP:
- skb->csum = offsetof(struct tcphdr, check);
- break;
- case IPPROTO_UDP:
- skb->csum = offsetof(struct udphdr, check);
- break;
- default:
- if (net_ratelimit())
- printk(KERN_ERR "Attempting to checksum a non-"
- "TCP/UDP packet, dropping a protocol"
- " %d packet", skb->nh.iph->protocol);
- goto out;
- }
- if (!skb_pull_up_to(skb, skb->h.raw + skb->csum + 2))
- goto out;
- skb->ip_summed = CHECKSUM_HW;
- skb->proto_csum_blank = 0;
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
+ __u16 csum_start, csum_offset;
+
+ if (!skb->proto_csum_blank)
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+
+ if (!skb_pull_up_to(skb, skb_network_header(skb) + 1))
+ goto out;
+
+ iph = ip_hdr(skb);
+ th = skb_network_header(skb) + 4 * iph->ihl;
+
+ csum_start = th - skb->head;
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ csum_offset = offsetof(struct tcphdr, check);
+ break;
+ case IPPROTO_UDP:
+ csum_offset = offsetof(struct udphdr, check);
+ break;
+ default:
+ if (net_ratelimit())
+ printk(KERN_ERR "Attempting to checksum a non-"
+ "TCP/UDP packet, dropping a protocol"
+ " %d packet", iph->protocol);
+ goto out;
}
- return 0;
+
+ if (!skb_pull_up_to(skb, th + csum_offset + 2))
+ goto out;
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->proto_csum_blank = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ skb->csum_start = csum_start;
+ skb->csum_offset = csum_offset;
+#else
+ skb_set_transport_header(skb, csum_start - skb_headroom(skb));
+ skb->csum = csum_offset;
+#endif
+
+ err = 0;
+
out:
- return -EPROTO;
+ return err;
}
+#endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
+
+ /* Types of checksums that we can receive (these all refer to L4 checksums):
+ * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
+ * (though not verified) checksum in packet but not in skb->csum. Packets
+ * from the bridge local port will also have this type.
+ * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
+ * also the GRE module. This is the same as CHECKSUM_NONE, except it has
+ * a valid skb->csum. Importantly, both contain a full checksum (not
+ * verified) in the packet itself. The only difference is that if the
+ * packet gets to L4 processing on this machine (not in DomU) we won't
+ * have to recompute the checksum to verify. Most hardware devices do not
+ * produce packets with this type, even if they support receive checksum
+ * offloading (they produce type #5).
+ * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
+ * be computed if it is sent off box. Unfortunately on earlier kernels,
+ * this case is impossible to distinguish from #2, despite having opposite
+ * meanings. Xen adds an extra field on earlier kernels (see #4) in order
+ * to distinguish the different states. The only real user of this type
+ * with bridging is Xen (on later kernels).
+ * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
+ * generated locally by a Xen DomU and has a partial checksum. If it is
+ * handled on this machine (Dom0 or DomU), then the checksum will not be
+ * computed. If it goes off box, the checksum in the packet needs to be
+ * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
+ * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
+ * kernels, this combination is replaced with CHECKSUM_PARTIAL.
+ * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
+ * full checksum or using a protocol without a checksum. skb->csum is
+ * undefined. This is common from devices with receive checksum
+ * offloading. This is somewhat similar to CHECKSUM_NONE, except that
+ * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
+ *
+ * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
+ * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
+ * based on whether it is on the transmit or receive path. After the datapath
+ * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
+ * checksum, we will panic. Since we can receive packets with checksums, we
+ * assume that all CHECKSUM_HW packets have checksums and map them to
+ * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
+ * packet is processed by the local IP stack, in which case it will need to
+ * be reverified). If we receive a packet with CHECKSUM_HW that really means
+ * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
+ * shouldn't be any devices that do this with bridging.
+ *
+ * The bridge has similar behavior and this function closely resembles
+ * skb_forward_csum(). It is slightly different because we are only concerned
+ * with bridging and not other types of forwarding and can get away with
+ * slightly more optimal behavior.*/
+void
+forward_ip_summed(struct sk_buff *skb)
+{
+#ifdef CHECKSUM_HW
+ if (skb->ip_summed == CHECKSUM_HW)
+ skb->ip_summed = CHECKSUM_NONE;
+#endif
+}
+
+/* Append each packet in 'skb' list to 'queue'. There will be only one packet
+ * unless we broke up a GSO packet. */
+static int
+queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
+ int queue_no, u32 arg)
+{
+ struct sk_buff *nskb;
+ int port_no;
+ int err;
+
+ port_no = ODPP_LOCAL;
+ if (skb->dev) {
+ if (skb->dev->br_port)
+ port_no = skb->dev->br_port->port_no;
+ else if (is_dp_dev(skb->dev))
+ port_no = dp_dev_priv(skb->dev)->port_no;
+ }
+
+ do {
+ struct odp_msg *header;
+
+ nskb = skb->next;
+ skb->next = NULL;
+
+ /* If a checksum-deferred packet is forwarded to the
+ * controller, correct the pointers and checksum. This happens
+ * on a regular basis only on Xen, on which VMs can pass up
+ * packets that do not have their checksum computed.
+ */
+ err = vswitch_skb_checksum_setup(skb);
+ if (err)
+ goto err_kfree_skbs;
+#ifndef CHECKSUM_HW
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ /* Until 2.6.22, the start of the transport header was
+ * also the start of data to be checksummed. Linux
+ * 2.6.22 introduced the csum_start field for this
+ * purpose, but we should point the transport header to
+ * it anyway for backward compatibility, as
+ * dev_queue_xmit() does even in 2.6.28. */
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+#endif
+ err = skb_checksum_help(skb);
+ if (err)
+ goto err_kfree_skbs;
+ }
#else
-int vswitch_skb_checksum_setup(struct sk_buff *skb) { return 0; }
-#endif /* CONFIG_XEN && linux == 2.6.18 */
+ if (skb->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(skb, 0);
+ if (err)
+ goto err_kfree_skbs;
+ }
+#endif
+
+ err = skb_cow(skb, sizeof *header);
+ if (err)
+ goto err_kfree_skbs;
+
+ header = (struct odp_msg*)__skb_push(skb, sizeof *header);
+ header->type = queue_no;
+ header->length = skb->len;
+ header->port = port_no;
+ header->reserved = 0;
+ header->arg = arg;
+ skb_queue_tail(queue, skb);
+
+ skb = nskb;
+ } while (skb);
+ return 0;
+
+err_kfree_skbs:
+ kfree_skb(skb);
+ while ((skb = nskb) != NULL) {
+ nskb = skb->next;
+ kfree_skb(skb);
+ }
+ return err;
+}
int
dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
- int port_no;
int err;
WARN_ON_ONCE(skb_shared(skb));
- BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR);
-
+ BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
queue = &dp->queues[queue_no];
err = -ENOBUFS;
if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
goto err_kfree_skb;
- /* If a checksum-deferred packet is forwarded to the controller,
- * correct the pointers and checksum. This happens on a regular basis
- * only on Xen, on which VMs can pass up packets that do not have their
- * checksum computed.
- */
- err = vswitch_skb_checksum_setup(skb);
- if (err)
- goto err_kfree_skb;
-#ifndef CHECKSUM_HW
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was also the
- * start of data to be checksummed. Linux 2.6.22 introduced
- * the csum_start field for this purpose, but we should point
- * the transport header to it anyway for backward
- * compatibility, as dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
-#endif
- err = skb_checksum_help(skb);
- if (err)
- goto err_kfree_skb;
- }
-#else
- if (skb->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(skb, 0);
- if (err)
- goto err_kfree_skb;
- }
-#endif
+ forward_ip_summed(skb);
/* Break apart GSO packets into their component pieces. Otherwise
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
}
}
- /* Figure out port number. */
- port_no = ODPP_LOCAL;
- if (skb->dev) {
- if (skb->dev->br_port)
- port_no = skb->dev->br_port->port_no;
- else if (is_dp_dev(skb->dev))
- port_no = dp_dev_priv(skb->dev)->port_no;
- }
-
- /* Append each packet to queue. There will be only one packet unless
- * we broke up a GSO packet above. */
- do {
- struct odp_msg *header;
- struct sk_buff *nskb = skb->next;
- skb->next = NULL;
-
- err = skb_cow(skb, sizeof *header);
- if (err) {
- while (nskb) {
- kfree_skb(skb);
- skb = nskb;
- nskb = skb->next;
- }
- goto err_kfree_skb;
- }
-
- header = (struct odp_msg*)__skb_push(skb, sizeof *header);
- header->type = queue_no;
- header->length = skb->len;
- header->port = port_no;
- header->reserved = 0;
- header->arg = arg;
- skb_queue_tail(queue, skb);
-
- skb = nskb;
- } while (skb);
-
+ err = queue_control_packets(skb, queue, queue_no, arg);
wake_up_interruptible(&dp->waitqueue);
- return 0;
+ return err;
err_kfree_skb:
kfree_skb(skb);
break;
case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp & ~VLAN_PCP_MASK)
+ if (a->vlan_pcp.vlan_pcp
+ & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
return -EINVAL;
break;
return 0;
}
-static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp)
+static int answer_query(struct sw_flow *flow, u32 query_flags,
+ struct odp_flow __user *ufp)
{
struct odp_flow_stats stats;
unsigned long int flags;
spin_lock_irqsave(&flow->lock, flags);
get_stats(flow, &stats);
+
+ if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
+ flow->tcp_flags = 0;
+ }
spin_unlock_irqrestore(&flow->lock, flags);
if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
* we get completely accurate stats, but that blows our performance,
* badly. */
dp->n_flows--;
- error = answer_query(flow, ufp);
+ error = answer_query(flow, 0, ufp);
flow_deferred_free(flow);
error:
if (!flow)
error = __put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow, ufp);
+ error = answer_query(flow, uf.flags, ufp);
if (error)
return -EFAULT;
}
if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
return -EFAULT;
- error = answer_query(flow, ufp);
+ error = answer_query(flow, 0, ufp);
if (error)
return error;
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- /* Normally, setting the skb 'protocol' field would be handled by a
- * call to eth_type_trans(), but it assumes there's a sending
- * device, which we may not have. */
+ /* Normally, setting the skb 'protocol' field would be handled by a
+ * call to eth_type_trans(), but it assumes there's a sending
+ * device, which we may not have. */
if (ntohs(eth->h_proto) >= 1536)
skb->protocol = eth->h_proto;
else
return mtu ? mtu : ETH_DATA_LEN;
}
+/* Sets the MTU of all datapath devices to the minimum of the ports. 'dev'
+ * is the device whose MTU may have changed. Must be called with RTNL lock
+ * and dp_mutex. */
+void set_dp_devs_mtu(const struct datapath *dp, struct net_device *dev)
+{
+ struct net_bridge_port *p;
+ int mtu;
+
+ ASSERT_RTNL();
+
+ if (is_dp_dev(dev))
+ return;
+
+ mtu = dp_min_mtu(dp);
+
+ list_for_each_entry_rcu (p, &dp->port_list, node) {
+ struct net_device *br_dev = p->dev;
+
+ if (is_dp_dev(br_dev))
+ dev_set_mtu(br_dev, mtu);
+ }
+}
+
static int
put_port(const struct net_bridge_port *p, struct odp_port __user *uop)
{
return 0;
}
+static int get_listen_mask(const struct file *f)
+{
+ return (long)f->private_data;
+}
+
+static void set_listen_mask(struct file *f, int listen_mask)
+{
+ f->private_data = (void*)(long)listen_mask;
+}
+
static long openvswitch_ioctl(struct file *f, unsigned int cmd,
unsigned long argp)
{
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp;
int drop_frags, listeners, port_no;
+ unsigned int sflow_probability;
int err;
/* Handle commands with special locking requirements up front. */
break;
case ODP_GET_LISTEN_MASK:
- err = put_user((int)f->private_data, (int __user *)argp);
+ err = put_user(get_listen_mask(f), (int __user *)argp);
break;
case ODP_SET_LISTEN_MASK:
if (listeners & ~ODPL_ALL)
break;
err = 0;
- f->private_data = (void*)listeners;
+ set_listen_mask(f, listeners);
+ break;
+
+ case ODP_GET_SFLOW_PROBABILITY:
+ err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
+ break;
+
+ case ODP_SET_SFLOW_PROBABILITY:
+ err = get_user(sflow_probability, (unsigned int __user *)argp);
+ if (!err)
+ dp->sflow_probability = sflow_probability;
break;
case ODP_PORT_QUERY:
loff_t *ppos)
{
/* XXX is there sufficient synchronization here? */
- int listeners = (int) f->private_data;
+ int listeners = get_listen_mask(f);
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
struct sk_buff *skb;
}
}
success:
- copy_bytes = min(skb->len, nbytes);
+ copy_bytes = min_t(size_t, skb->len, nbytes);
iov.iov_base = buf;
iov.iov_len = copy_bytes;
retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
if (dp) {
mask = 0;
poll_wait(file, &dp->waitqueue, wait);
- if (dp_has_packet_of_interest(dp, (int)file->private_data))
+ if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
mask |= POLLIN | POLLRDNORM;
} else {
mask = POLLIN | POLLRDNORM | POLLHUP;
};
static int major;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
static struct llc_sap *dp_stp_sap;
static int dp_stp_rcv(struct sk_buff *skb, struct net_device *dev,
return 0;
}
-static int __init dp_init(void)
+static int dp_avoid_bridge_init(void)
{
- int err;
-
- printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
-
/* Register to receive STP packets because the bridge module also
* attempts to do so. Since there can only be a single listener for a
* given protocol, this provides mutual exclusion against the bridge
printk(KERN_ERR "openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
return -EADDRINUSE;
}
+ return 0;
+}
+
+static void dp_avoid_bridge_exit(void)
+{
+ llc_sap_put(dp_stp_sap);
+}
+#else /* Linux 2.6.27 or later. */
+static int dp_avoid_bridge_init(void)
+{
+ /* Linux 2.6.27 introduces a way for multiple clients to register for
+ * STP packets, which interferes with what we try to do above.
+ * Instead, just check whether there's a bridge hook defined. This is
+ * not as safe--the bridge module is willing to load over the top of
+ * us--but it provides a little bit of protection. */
+ if (br_handle_frame_hook) {
+ printk(KERN_ERR "openvswitch: bridge module is loaded, cannot load over it\n");
+ return -EADDRINUSE;
+ }
+ return 0;
+}
+
+static void dp_avoid_bridge_exit(void)
+{
+ /* Nothing to do. */
+}
+#endif /* Linux 2.6.27 or later */
+
+static int __init dp_init(void)
+{
+ int err;
+
+ printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
+
+ err = dp_avoid_bridge_init();
+ if (err)
+ return err;
err = flow_init();
if (err)
unregister_netdevice_notifier(&dp_device_notifier);
flow_exit();
br_handle_frame_hook = NULL;
- llc_sap_put(dp_stp_sap);
+ dp_avoid_bridge_exit();
}
module_init(dp_init);