#include <asm/system.h>
#include <asm/div64.h>
#include <asm/bug.h>
+#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
vport_lock();
if (odp_port->flags & ODP_PORT_INTERNAL)
- vport = __vport_add(odp_port->devname, "internal", NULL);
+ vport = vport_add(odp_port->devname, "internal", NULL);
else
- vport = __vport_add(odp_port->devname, "netdev", NULL);
+ vport = vport_add(odp_port->devname, "netdev", NULL);
vport_unlock();
if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
vport_lock();
- __vport_del(vport);
+ vport_del(vport);
vport_unlock();
}
}
return err;
}
-/* Must be called with rcu_read_lock and with bottom-halves disabled. */
+/* Must be called with rcu_read_lock. */
void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
+ int stats_counter_off;
struct odp_flow_key key;
struct tbl_node *flow_node;
OVS_CB(skb)->dp_port = p;
- /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
- stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
-
if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
if (dp->drop_frags) {
kfree_skb(skb);
- stats->n_frags++;
- return;
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
+ goto out;
}
}
flow_used(flow, skb);
execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
GFP_ATOMIC);
- stats->n_hit++;
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
} else {
- stats->n_missed++;
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
}
+
+out:
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+ (*(u64 *)((u8 *)stats + stats_counter_off))++;
+ local_bh_enable();
}
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
nskb = skb->next;
skb->next = NULL;
- /* If a checksum-deferred packet is forwarded to the
- * controller, correct the pointers and checksum.
- */
- err = vswitch_skb_checksum_setup(skb);
- if (err)
- goto err_kfree_skbs;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was
- * also the start of data to be checksummed. Linux
- * 2.6.22 introduced the csum_start field for this
- * purpose, but we should point the transport header to
- * it anyway for backward compatibility, as
- * dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
-#endif
-
- err = skb_checksum_help(skb);
- if (err)
- goto err_kfree_skbs;
- }
-
err = skb_cow(skb, sizeof *header);
if (err)
goto err_kfree_skbs;
forward_ip_summed(skb);
+ err = vswitch_skb_checksum_setup(skb);
+ if (err)
+ goto err_kfree_skb;
+
/* Break apart GSO packets into their component pieces. Otherwise
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
if (skb_is_gso(skb)) {
- struct sk_buff *nskb = skb_gso_segment(skb, 0);
+ struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
if (nskb) {
kfree_skb(skb);
skb = nskb;
err_kfree_skb:
kfree_skb(skb);
err:
- stats = percpu_ptr(dp->stats_percpu, get_cpu());
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
stats->n_lost++;
- put_cpu();
+ local_bh_enable();
return err;
}
skb->protocol = htons(ETH_P_802_2);
flow_extract(skb, execute->in_port, &key);
+
+ rcu_read_lock();
err = execute_actions(dp, skb, &key, actions->actions,
actions->n_actions, GFP_KERNEL);
+ rcu_read_unlock();
+
kfree(actions);
return err;
stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
for_each_possible_cpu(i) {
const struct dp_stats_percpu *s;
- s = percpu_ptr(dp->stats_percpu, i);
+ s = per_cpu_ptr(dp->stats_percpu, i);
stats.n_frags += s->n_frags;
stats.n_hit += s->n_hit;
stats.n_missed += s->n_missed;
if (copy_from_user(&pg, upg, sizeof pg))
return -EFAULT;
- return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &pg.n_ports);
+ return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &upg->n_ports);
}
static int get_listen_mask(const struct file *f)
goto exit;
case ODP_VPORT_ADD:
- err = vport_add((struct odp_vport_add __user *)argp);
+ err = vport_user_add((struct odp_vport_add __user *)argp);
goto exit;
case ODP_VPORT_MOD:
- err = vport_mod((struct odp_vport_mod __user *)argp);
+ err = vport_user_mod((struct odp_vport_mod __user *)argp);
goto exit;
case ODP_VPORT_DEL:
- err = vport_del((char __user *)argp);
+ err = vport_user_del((char __user *)argp);
goto exit;
case ODP_VPORT_STATS_GET:
- err = vport_stats_get((struct odp_vport_stats_req __user *)argp);
+ err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
+ goto exit;
+
+ case ODP_VPORT_STATS_SET:
+ err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
goto exit;
case ODP_VPORT_ETHER_GET:
- err = vport_ether_get((struct odp_vport_ether __user *)argp);
+ err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
goto exit;
case ODP_VPORT_ETHER_SET:
- err = vport_ether_set((struct odp_vport_ether __user *)argp);
+ err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
goto exit;
case ODP_VPORT_MTU_GET:
- err = vport_mtu_get((struct odp_vport_mtu __user *)argp);
+ err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
goto exit;
case ODP_VPORT_MTU_SET:
- err = vport_mtu_set((struct odp_vport_mtu __user *)argp);
+ err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
goto exit;
}
return -EFAULT;
return do_get_port_group(dp, compat_ptr(pg.ports), pg.n_ports,
- pg.group, &pg.n_ports);
+ pg.group, &upg->n_ports);
}
static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
case ODP_VPORT_MTU_GET:
case ODP_VPORT_ETHER_SET:
case ODP_VPORT_ETHER_GET:
+ case ODP_VPORT_STATS_SET:
case ODP_VPORT_STATS_GET:
case ODP_DP_STATS:
case ODP_GET_DROP_FRAGS:
return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
case ODP_VPORT_ADD32:
- return compat_vport_add(compat_ptr(argp));
+ return compat_vport_user_add(compat_ptr(argp));
case ODP_VPORT_MOD32:
- return compat_vport_mod(compat_ptr(argp));
+ return compat_vport_user_mod(compat_ptr(argp));
}
dp = get_dp_locked(dp_idx);
}
#endif
+/* Unfortunately this function is not exported so this is a verbatim copy
+ * from net/core/datagram.c in 2.6.30. */
+static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
+ u8 __user *to, int len,
+ __wsum *csump)
+{
+ int start = skb_headlen(skb);
+ int pos = 0;
+ int i, copy = start - offset;
+
+ /* Copy header. */
+ if (copy > 0) {
+ int err = 0;
+ if (copy > len)
+ copy = len;
+ *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
+ *csump, &err);
+ if (err)
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ pos = copy;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end - offset) > 0) {
+ __wsum csum2;
+ int err = 0;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = frag->page;
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap(page);
+ csum2 = csum_and_copy_to_user(vaddr +
+ frag->page_offset +
+ offset - start,
+ to, copy, 0, &err);
+ kunmap(page);
+ if (err)
+ goto fault;
+ *csump = csum_block_add(*csump, csum2, pos);
+ if (!(len -= copy))
+ return 0;
+ offset += copy;
+ to += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list; list=list->next) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + list->len;
+ if ((copy = end - offset) > 0) {
+ __wsum csum2 = 0;
+ if (copy > len)
+ copy = len;
+ if (skb_copy_and_csum_datagram(list,
+ offset - start,
+ to, copy,
+ &csum2))
+ goto fault;
+ *csump = csum_block_add(*csump, csum2, pos);
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+ }
+ if (!len)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+
ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
loff_t *ppos)
{
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
struct sk_buff *skb;
- struct iovec __user iov;
- size_t copy_bytes;
+ size_t copy_bytes, tot_copy_bytes;
int retval;
if (!dp)
}
}
success:
- copy_bytes = min_t(size_t, skb->len, nbytes);
- iov.iov_base = buf;
- iov.iov_len = copy_bytes;
- retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
+ copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
+
+ retval = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (copy_bytes == skb->len) {
+ __wsum csum = 0;
+ int csum_start, csum_offset;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ /* Until 2.6.22, the start of the transport header was
+ * also the start of data to be checksummed. Linux
+ * 2.6.22 introduced the csum_start field for this
+ * purpose, but we should point the transport header to
+ * it anyway for backward compatibility, as
+ * dev_queue_xmit() does even in 2.6.28. */
+ skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
+ csum_offset = skb->csum_offset;
+#else
+ csum_offset = skb->csum;
+#endif
+ csum_start = skb_transport_header(skb) - skb->data;
+ retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
+ copy_bytes - csum_start, &csum);
+ if (!retval) {
+ __sum16 __user *csump;
+
+ copy_bytes = csum_start;
+ csump = (__sum16 __user *)(buf + csum_start + csum_offset);
+ put_user(csum_fold(csum), csump);
+ }
+ } else
+ retval = skb_checksum_help(skb);
+ }
+
+ if (!retval) {
+ struct iovec __user iov;
+
+ iov.iov_base = buf;
+ iov.iov_len = copy_bytes;
+ retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
+ }
+
if (!retval)
- retval = copy_bytes;
+ retval = tot_copy_bytes;
+
kfree_skb(skb);
error: