/* Functions for managing the dp interface/device. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/udp.h>
#include <linux/version.h>
#include <linux/ethtool.h>
-#include <linux/random.h>
#include <linux/wait.h>
#include <asm/system.h>
#include <asm/div64.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/rculist.h>
-#include <linux/workqueue.h>
#include <linux/dmi.h>
#include <net/inet_ecn.h>
#include <linux/compat.h>
#include "datapath.h"
#include "actions.h"
#include "flow.h"
+#include "loop_counter.h"
#include "odp-compat.h"
#include "table.h"
#include "vport-internal_dev.h"
#include "compat.h"
-
int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
EXPORT_SYMBOL(dp_ioctl_hook);
* dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
* lock first.
*
- * It is safe to access the datapath and dp_port structures with just
+ * It is safe to access the datapath and vport structures with just
* dp_mutex.
*/
static struct datapath *dps[ODP_MAX];
static DEFINE_MUTEX(dp_mutex);
-/* Number of milliseconds between runs of the maintenance thread. */
-#define MAINT_SLEEP_MSECS 1000
-
-static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
+static int new_vport(struct datapath *, struct odp_port *, int port_no);
/* Must be called with rcu_read_lock or dp_mutex. */
struct datapath *get_dp(int dp_idx)
/* Must be called with rcu_read_lock or RTNL lock. */
const char *dp_name(const struct datapath *dp)
{
- return vport_get_name(dp->ports[ODPP_LOCAL]->vport);
+ return vport_get_name(dp->ports[ODPP_LOCAL]);
}
static inline size_t br_nlmsg_size(void)
}
static int dp_fill_ifinfo(struct sk_buff *skb,
- const struct dp_port *port,
+ const struct vport *port,
int event, unsigned int flags)
{
const struct datapath *dp = port->dp;
- int ifindex = vport_get_ifindex(port->vport);
- int iflink = vport_get_iflink(port->vport);
+ int ifindex = vport_get_ifindex(port);
+ int iflink = vport_get_iflink(port);
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
hdr->__ifi_pad = 0;
hdr->ifi_type = ARPHRD_ETHER;
hdr->ifi_index = ifindex;
- hdr->ifi_flags = vport_get_flags(port->vport);
+ hdr->ifi_flags = vport_get_flags(port);
hdr->ifi_change = 0;
- NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
- NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport));
- NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
+ NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
+ NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]));
+ NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
- vport_is_running(port->vport)
- ? vport_get_operstate(port->vport)
+ vport_is_running(port)
+ ? vport_get_operstate(port)
: IF_OPER_DOWN);
#endif
- NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
- vport_get_addr(port->vport));
+ NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
if (ifindex != iflink)
NLA_PUT_U32(skb, IFLA_LINK,iflink);
return -EMSGSIZE;
}
-static void dp_ifinfo_notify(int event, struct dp_port *port)
+static void dp_ifinfo_notify(int event, struct vport *port)
{
struct sk_buff *skb;
int err = -ENOBUFS;
/* Set up our datapath device. */
BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
strcpy(internal_dev_port.devname, devname);
- internal_dev_port.flags = ODP_PORT_INTERNAL;
- err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
+ strcpy(internal_dev_port.type, "internal");
+ err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
if (err) {
if (err == -EBUSY)
err = -EEXIST;
return 0;
err_destroy_local_port:
- dp_detach_port(dp->ports[ODPP_LOCAL], 1);
+ dp_detach_port(dp->ports[ODPP_LOCAL]);
err_destroy_table:
tbl_destroy(dp->table, NULL);
err_free_dp:
static void do_destroy_dp(struct datapath *dp)
{
- struct dp_port *p, *n;
+ struct vport *p, *n;
int i;
list_for_each_entry_safe (p, n, &dp->port_list, node)
if (p->port_no != ODPP_LOCAL)
- dp_detach_port(p, 1);
+ dp_detach_port(p);
dp_sysfs_del_dp(dp);
rcu_assign_pointer(dps[dp->dp_idx], NULL);
- dp_detach_port(dp->ports[ODPP_LOCAL], 1);
+ dp_detach_port(dp->ports[ODPP_LOCAL]);
tbl_destroy(dp->table, flow_free_tbl);
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
- for (i = 0; i < DP_MAX_GROUPS; i++)
- kfree(dp->groups[i]);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
module_put(THIS_MODULE);
return err;
}
-static void release_dp_port(struct kobject *kobj)
-{
- struct dp_port *p = container_of(kobj, struct dp_port, kobj);
- kfree(p);
-}
-
-static struct kobj_type brport_ktype = {
-#ifdef CONFIG_SYSFS
- .sysfs_ops = &brport_sysfs_ops,
-#endif
- .release = release_dp_port
-};
-
/* Called with RTNL lock and dp_mutex. */
-static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no)
+static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
{
+ struct vport_parms parms;
struct vport *vport;
- struct dp_port *p;
- int err;
- vport = vport_locate(odp_port->devname);
- if (!vport) {
- vport_lock();
-
- if (odp_port->flags & ODP_PORT_INTERNAL)
- vport = vport_add(odp_port->devname, "internal", NULL);
- else
- vport = vport_add(odp_port->devname, "netdev", NULL);
-
- vport_unlock();
+ parms.name = odp_port->devname;
+ parms.type = odp_port->type;
+ parms.config = odp_port->config;
+ parms.dp = dp;
+ parms.port_no = port_no;
- if (IS_ERR(vport))
- return PTR_ERR(vport);
- }
+ vport_lock();
+ vport = vport_add(&parms);
+ vport_unlock();
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
- p->port_no = port_no;
- p->dp = dp;
- atomic_set(&p->sflow_pool, 0);
-
- err = vport_attach(vport, p);
- if (err) {
- kfree(p);
- return err;
- }
-
- rcu_assign_pointer(dp->ports[port_no], p);
- list_add_rcu(&p->node, &dp->port_list);
+ rcu_assign_pointer(dp->ports[port_no], vport);
+ list_add_rcu(&vport->node, &dp->port_list);
dp->n_ports++;
- /* Initialize kobject for bridge. This will be added as
- * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
- p->kobj.kset = NULL;
- kobject_init(&p->kobj, &brport_ktype);
-
- dp_ifinfo_notify(RTM_NEWLINK, p);
+ dp_ifinfo_notify(RTM_NEWLINK, vport);
return 0;
}
if (copy_from_user(&port, portp, sizeof port))
goto out;
port.devname[IFNAMSIZ - 1] = '\0';
+ port.type[VPORT_TYPE_SIZE - 1] = '\0';
rtnl_lock();
dp = get_dp_locked(dp_idx);
goto out_unlock_dp;
got_port_no:
- err = new_dp_port(dp, &port, port_no);
+ err = new_vport(dp, &port, port_no);
if (err)
goto out_unlock_dp;
return err;
}
-int dp_detach_port(struct dp_port *p, int may_delete)
+int dp_detach_port(struct vport *p)
{
- struct vport *vport = p->vport;
int err;
ASSERT_RTNL();
list_del_rcu(&p->node);
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
- err = vport_detach(vport);
- if (err)
- return err;
-
- /* Then wait until no one is still using it, and destroy it. */
- synchronize_rcu();
-
- if (may_delete) {
- const char *port_type = vport_get_type(vport);
-
- if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
- vport_lock();
- vport_del(vport);
- vport_unlock();
- }
- }
+ /* Then destroy it. */
+ vport_lock();
+ err = vport_del(p);
+ vport_unlock();
- kobject_put(&p->kobj);
-
- return 0;
+ return err;
}
static int detach_port(int dp_idx, int port_no)
{
- struct dp_port *p;
+ struct vport *p;
struct datapath *dp;
int err;
if (!p)
goto out_unlock_dp;
- err = dp_detach_port(p, 1);
+ err = dp_detach_port(p);
out_unlock_dp:
mutex_unlock(&dp->mutex);
}
/* Must be called with rcu_read_lock. */
-void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
+void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
int stats_counter_off;
- struct odp_flow_key key;
- struct tbl_node *flow_node;
+ struct sw_flow_actions *acts;
+ struct loop_counter *loop;
+ int error;
- WARN_ON_ONCE(skb_shared(skb));
- skb_warn_if_lro(skb);
+ OVS_CB(skb)->vport = p;
+
+ if (!OVS_CB(skb)->flow) {
+ struct odp_flow_key key;
+ struct tbl_node *flow_node;
+ bool is_frag;
- OVS_CB(skb)->dp_port = p;
+ /* Extract flow from 'skb' into 'key'. */
+ error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
+ if (unlikely(error)) {
+ kfree_skb(skb);
+ return;
+ }
- if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
- if (dp->drop_frags) {
+ if (is_frag && dp->drop_frags) {
kfree_skb(skb);
stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
goto out;
}
+
+ /* Look up flow. */
+ flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
+ flow_hash(&key), flow_cmp);
+ if (unlikely(!flow_node)) {
+ dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ goto out;
+ }
+
+ OVS_CB(skb)->flow = flow_cast(flow_node);
}
- flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
- struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
- flow_used(flow, skb);
- execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
- GFP_ATOMIC);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
- } else {
- stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ flow_used(OVS_CB(skb)->flow, skb);
+
+ acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+ /* Check whether we've looped too much. */
+ loop = loop_get_counter();
+ if (unlikely(++loop->count > MAX_LOOPS))
+ loop->looping = true;
+ if (unlikely(loop->looping)) {
+ loop_suppress(dp, acts);
+ goto out_loop;
}
+ /* Execute actions. */
+ execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
+ acts->n_actions);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+
+ /* Check whether sub-actions looped too much. */
+ if (unlikely(loop->looping))
+ loop_suppress(dp, acts);
+
+out_loop:
+ /* Decrement loop counter. */
+ if (!--loop->count)
+ loop->looping = false;
+ loop_put_counter();
+
out:
+ /* Update datapath statistics. */
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
(*(u64 *)((u8 *)stats + stats_counter_off))++;
+ write_seqcount_end(&stats->seqlock);
+
local_bh_enable();
}
break;
default:
if (net_ratelimit())
- printk(KERN_ERR "Attempting to checksum a non-"
- "TCP/UDP packet, dropping a protocol"
- " %d packet", iph->protocol);
+ pr_err("Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet",
+ iph->protocol);
goto out;
}
* be reverified). If we receive a packet with CHECKSUM_HW that really means
* CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
* shouldn't be any devices that do this with bridging. */
-void
-compute_ip_summed(struct sk_buff *skb, bool xmit)
+void compute_ip_summed(struct sk_buff *skb, bool xmit)
{
/* For our convenience these defines change repeatedly between kernel
* versions, so we can't just copy them over... */
break;
#endif
default:
- printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
- skb->ip_summed);
+ pr_err("unknown checksum type %d\n", skb->ip_summed);
/* None seems the safest... */
OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- }
+ }
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
/* Xen has a special way of representing CHECKSUM_PARTIAL on older
* is slightly different because we are only concerned with bridging and not
* other types of forwarding and can get away with slightly more optimal
* behavior.*/
-void
-forward_ip_summed(struct sk_buff *skb)
+void forward_ip_summed(struct sk_buff *skb)
{
#ifdef CHECKSUM_HW
if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
/* Append each packet in 'skb' list to 'queue'. There will be only one packet
* unless we broke up a GSO packet. */
-static int
-queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
- int queue_no, u32 arg)
+static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
+ int queue_no, u32 arg)
{
struct sk_buff *nskb;
int port_no;
int err;
- if (OVS_CB(skb)->dp_port)
- port_no = OVS_CB(skb)->dp_port->port_no;
+ if (OVS_CB(skb)->vport)
+ port_no = OVS_CB(skb)->vport->port_no;
else
port_no = ODPP_LOCAL;
return err;
}
-int
-dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
- u32 arg)
+int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
+ u32 arg)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
err:
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->n_lost++;
+ write_seqcount_end(&stats->seqlock);
+
local_bh_enable();
return err;
for (i = 0; i < actions->n_actions; i++) {
const union odp_action *a = &actions->actions[i];
- switch (a->type) {
- case ODPAT_OUTPUT:
- if (a->output.port >= DP_MAX_PORTS)
- return -EINVAL;
- break;
- case ODPAT_OUTPUT_GROUP:
- if (a->output_group.group >= DP_MAX_GROUPS)
- return -EINVAL;
+ switch (a->type) {
+ case ODPAT_CONTROLLER:
+ case ODPAT_STRIP_VLAN:
+ case ODPAT_SET_DL_SRC:
+ case ODPAT_SET_DL_DST:
+ case ODPAT_SET_NW_SRC:
+ case ODPAT_SET_NW_DST:
+ case ODPAT_SET_TP_SRC:
+ case ODPAT_SET_TP_DST:
+ case ODPAT_SET_TUNNEL:
+ case ODPAT_SET_PRIORITY:
+ case ODPAT_POP_PRIORITY:
+ case ODPAT_DROP_SPOOFED_ARP:
+ /* No validation needed. */
break;
- case ODPAT_SET_VLAN_VID:
- if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
+ case ODPAT_OUTPUT:
+ if (a->output.port >= DP_MAX_PORTS)
return -EINVAL;
break;
- case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp
- & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
+ case ODPAT_SET_DL_TCI:
+ if (a->dl_tci.tci & htons(VLAN_CFI_MASK))
return -EINVAL;
break;
break;
default:
- if (a->type >= ODPAT_N_ACTIONS)
- return -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
}
static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
{
- if (flow->used.tv_sec) {
- stats->used_sec = flow->used.tv_sec;
- stats->used_nsec = flow->used.tv_nsec;
+ if (flow->used) {
+ struct timespec offset_ts, used, now_mono;
+
+ ktime_get_ts(&now_mono);
+ jiffies_to_timespec(jiffies - flow->used, &offset_ts);
+ set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
+ now_mono.tv_nsec - offset_ts.tv_nsec);
+
+ stats->used_sec = used.tv_sec;
+ stats->used_nsec = used.tv_nsec;
} else {
stats->used_sec = 0;
stats->used_nsec = 0;
}
+
stats->n_packets = flow->packet_count;
stats->n_bytes = flow->byte_count;
- stats->ip_tos = flow->ip_tos;
+ stats->reserved = 0;
stats->tcp_flags = flow->tcp_flags;
stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
{
- flow->used.tv_sec = flow->used.tv_nsec = 0;
+ flow->used = 0;
flow->tcp_flags = 0;
- flow->ip_tos = 0;
flow->packet_count = 0;
flow->byte_count = 0;
}
struct tbl *table;
int error;
- memset(uf->flow.key.reserved, 0, sizeof uf->flow.key.reserved);
-
table = rcu_dereference(dp->table);
flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
if (!flow_node) {
}
/* Allocate flow. */
- error = -ENOMEM;
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
- if (flow == NULL)
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
goto error;
+ }
flow->key = uf->flow.key;
- spin_lock_init(&flow->lock);
clear_stats(flow);
/* Obtain actions. */
error_free_flow_acts:
kfree(flow->sf_acts);
error_free_flow:
- kmem_cache_free(flow_cache, flow);
+ flow->sf_acts = NULL;
+ flow_put(flow);
error:
return error;
}
if (get_user(actions, &ufp->actions))
return -EFAULT;
- return do_answer_query(flow, query_flags,
+ return do_answer_query(flow, query_flags,
&ufp->stats, actions, &ufp->n_actions);
}
struct tbl_node *flow_node;
int error;
- memset(key->reserved, 0, sizeof key->reserved);
flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
if (!flow_node)
return ERR_PTR(-ENOENT);
if (copy_from_user(&uf, ufp, sizeof uf))
return -EFAULT;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
if (!flow_node)
cbdata.uflows = flowvec->flows;
cbdata.n_flows = flowvec->n_flows;
cbdata.listed_flows = 0;
+
error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
return error ? error : cbdata.listed_flows;
}
struct sk_buff *skb;
struct sw_flow_actions *actions;
struct ethhdr *eth;
+ bool is_frag;
int err;
err = -EINVAL;
if (execute->length < ETH_HLEN || execute->length > 65535)
goto error;
- err = -ENOMEM;
actions = flow_actions_alloc(execute->n_actions);
- if (!actions)
+ if (IS_ERR(actions)) {
+ err = PTR_ERR(actions);
goto error;
+ }
err = -EFAULT;
if (copy_from_user(actions->actions, execute->actions,
if (!skb)
goto error_free_actions;
- if (execute->in_port < DP_MAX_PORTS)
- OVS_CB(skb)->dp_port = dp->ports[execute->in_port];
- else
- OVS_CB(skb)->dp_port = NULL;
-
err = -EFAULT;
if (copy_from_user(skb_put(skb, execute->length), execute->data,
execute->length))
else
skb->protocol = htons(ETH_P_802_2);
- flow_extract(skb, execute->in_port, &key);
+ err = flow_extract(skb, -1, &key, &is_frag);
+ if (err)
+ goto error_free_skb;
rcu_read_lock();
- err = execute_actions(dp, skb, &key, actions->actions,
- actions->n_actions, GFP_KERNEL);
+ err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions);
rcu_read_unlock();
kfree(actions);
stats.max_capacity = TBL_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
- stats.max_groups = DP_MAX_GROUPS;
stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
for_each_possible_cpu(i) {
- const struct dp_stats_percpu *s;
- s = per_cpu_ptr(dp->stats_percpu, i);
- stats.n_frags += s->n_frags;
- stats.n_hit += s->n_hit;
- stats.n_missed += s->n_missed;
- stats.n_lost += s->n_lost;
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats.n_frags += local_stats.n_frags;
+ stats.n_hit += local_stats.n_hit;
+ stats.n_missed += local_stats.n_missed;
+ stats.n_lost += local_stats.n_lost;
}
stats.max_miss_queue = DP_MAX_QUEUE_LEN;
stats.max_action_queue = DP_MAX_QUEUE_LEN;
/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
int dp_min_mtu(const struct datapath *dp)
{
- struct dp_port *p;
+ struct vport *p;
int mtu = 0;
ASSERT_RTNL();
/* Skip any internal ports, since that's what we're trying to
* set. */
- if (is_internal_vport(p->vport))
+ if (is_internal_vport(p))
continue;
- dev_mtu = vport_get_mtu(p->vport);
+ dev_mtu = vport_get_mtu(p);
if (!mtu || dev_mtu < mtu)
mtu = dev_mtu;
}
* be called with RTNL lock. */
void set_internal_devs_mtu(const struct datapath *dp)
{
- struct dp_port *p;
+ struct vport *p;
int mtu;
ASSERT_RTNL();
mtu = dp_min_mtu(dp);
list_for_each_entry_rcu (p, &dp->port_list, node) {
- if (is_internal_vport(p->vport))
- vport_set_mtu(p->vport, mtu);
+ if (is_internal_vport(p))
+ vport_set_mtu(p, mtu);
}
}
-static int
-put_port(const struct dp_port *p, struct odp_port __user *uop)
+static int put_port(const struct vport *p, struct odp_port __user *uop)
{
struct odp_port op;
memset(&op, 0, sizeof op);
rcu_read_lock();
- strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
+ strncpy(op.devname, vport_get_name(p), sizeof op.devname);
+ strncpy(op.type, vport_get_type(p), sizeof op.type);
rcu_read_unlock();
op.port = p->port_no;
- op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0;
return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
}
-static int
-query_port(struct datapath *dp, struct odp_port __user *uport)
+static int query_port(struct datapath *dp, struct odp_port __user *uport)
{
struct odp_port port;
if (port.devname[0]) {
struct vport *vport;
- struct dp_port *dp_port;
int err = 0;
port.devname[IFNAMSIZ - 1] = '\0';
err = -ENODEV;
goto error_unlock;
}
-
- dp_port = vport_get_dp_port(vport);
- if (!dp_port || dp_port->dp != dp) {
+ if (vport->dp != dp) {
err = -ENOENT;
goto error_unlock;
}
- port.port = dp_port->port_no;
+ port.port = vport->port_no;
error_unlock:
rcu_read_unlock();
return put_port(dp->ports[port.port], uport);
}
-static int
-do_list_ports(struct datapath *dp, struct odp_port __user *uports, int n_ports)
+static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
+ int n_ports)
{
int idx = 0;
if (n_ports) {
- struct dp_port *p;
+ struct vport *p;
list_for_each_entry_rcu (p, &dp->port_list, node) {
if (put_port(p, &uports[idx]))
return idx;
}
-static int
-list_ports(struct datapath *dp, struct odp_portvec __user *upv)
+static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
{
struct odp_portvec pv;
int retval;
return put_user(retval, &upv->n_ports);
}
-/* RCU callback for freeing a dp_port_group */
-static void free_port_group(struct rcu_head *rcu)
-{
- struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
- kfree(g);
-}
-
-static int
-do_set_port_group(struct datapath *dp, u16 __user *ports, int n_ports, int group)
-{
- struct dp_port_group *new_group, *old_group;
- int error;
-
- error = -EINVAL;
- if (n_ports > DP_MAX_PORTS || group >= DP_MAX_GROUPS)
- goto error;
-
- error = -ENOMEM;
- new_group = kmalloc(sizeof *new_group + sizeof(u16) * n_ports, GFP_KERNEL);
- if (!new_group)
- goto error;
-
- new_group->n_ports = n_ports;
- error = -EFAULT;
- if (copy_from_user(new_group->ports, ports, sizeof(u16) * n_ports))
- goto error_free;
-
- old_group = rcu_dereference(dp->groups[group]);
- rcu_assign_pointer(dp->groups[group], new_group);
- if (old_group)
- call_rcu(&old_group->rcu, free_port_group);
- return 0;
-
-error_free:
- kfree(new_group);
-error:
- return error;
-}
-
-static int
-set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
-{
- struct odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_set_port_group(dp, pg.ports, pg.n_ports, pg.group);
-}
-
-static int
-do_get_port_group(struct datapath *dp,
- u16 __user *ports, int n_ports, int group,
- u16 __user *n_portsp)
-{
- struct dp_port_group *g;
- u16 n_copy;
-
- if (group >= DP_MAX_GROUPS)
- return -EINVAL;
-
- g = dp->groups[group];
- n_copy = g ? min_t(int, g->n_ports, n_ports) : 0;
- if (n_copy && copy_to_user(ports, g->ports, n_copy * sizeof(u16)))
- return -EFAULT;
-
- if (put_user(g ? g->n_ports : 0, n_portsp))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_port_group(struct datapath *dp, struct odp_port_group __user *upg)
-{
- struct odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &upg->n_ports);
-}
-
static int get_listen_mask(const struct file *f)
{
return (long)f->private_data;
err = destroy_dp(dp_idx);
goto exit;
- case ODP_PORT_ATTACH:
+ case ODP_VPORT_ATTACH:
err = attach_port(dp_idx, (struct odp_port __user *)argp);
goto exit;
- case ODP_PORT_DETACH:
+ case ODP_VPORT_DETACH:
err = get_user(port_no, (int __user *)argp);
if (!err)
err = detach_port(dp_idx, port_no);
goto exit;
- case ODP_VPORT_ADD:
- err = vport_user_add((struct odp_vport_add __user *)argp);
- goto exit;
-
case ODP_VPORT_MOD:
- err = vport_user_mod((struct odp_vport_mod __user *)argp);
- goto exit;
-
- case ODP_VPORT_DEL:
- err = vport_user_del((char __user *)argp);
+ err = vport_user_mod((struct odp_port __user *)argp);
goto exit;
case ODP_VPORT_STATS_GET:
dp->sflow_probability = sflow_probability;
break;
- case ODP_PORT_QUERY:
+ case ODP_VPORT_QUERY:
err = query_port(dp, (struct odp_port __user *)argp);
break;
- case ODP_PORT_LIST:
+ case ODP_VPORT_LIST:
err = list_ports(dp, (struct odp_portvec __user *)argp);
break;
- case ODP_PORT_GROUP_SET:
- err = set_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
- case ODP_PORT_GROUP_GET:
- err = get_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
case ODP_FLOW_FLUSH:
err = flush_flows(dp);
break;
return put_user(retval, &upv->n_ports);
}
-static int compat_set_port_group(struct datapath *dp, const struct compat_odp_port_group __user *upg)
-{
- struct compat_odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_set_port_group(dp, compat_ptr(pg.ports), pg.n_ports, pg.group);
-}
-
-static int compat_get_port_group(struct datapath *dp, struct compat_odp_port_group __user *upg)
-{
- struct compat_odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_get_port_group(dp, compat_ptr(pg.ports), pg.n_ports,
- pg.group, &upg->n_ports);
-}
-
static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
{
compat_uptr_t actions;
if (compat_get_flow(&uf, ufp))
return -EFAULT;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
if (!flow_node)
cbdata.uflows = flows;
cbdata.n_flows = n_flows;
cbdata.listed_flows = 0;
+
error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
return error ? error : cbdata.listed_flows;
}
compat_uptr_t data;
if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
- __get_user(execute.in_port, &uexecute->in_port) ||
__get_user(actions, &uexecute->actions) ||
__get_user(execute.n_actions, &uexecute->n_actions) ||
__get_user(data, &uexecute->data) ||
return openvswitch_ioctl(f, cmd, argp);
case ODP_DP_CREATE:
- case ODP_PORT_ATTACH:
- case ODP_PORT_DETACH:
- case ODP_VPORT_DEL:
+ case ODP_VPORT_ATTACH:
+ case ODP_VPORT_DETACH:
+ case ODP_VPORT_MOD:
case ODP_VPORT_MTU_SET:
case ODP_VPORT_MTU_GET:
case ODP_VPORT_ETHER_SET:
case ODP_GET_LISTEN_MASK:
case ODP_SET_SFLOW_PROBABILITY:
case ODP_GET_SFLOW_PROBABILITY:
- case ODP_PORT_QUERY:
+ case ODP_VPORT_QUERY:
/* Ioctls that just need their pointer argument extended. */
return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
-
- case ODP_VPORT_ADD32:
- return compat_vport_user_add(compat_ptr(argp));
-
- case ODP_VPORT_MOD32:
- return compat_vport_user_mod(compat_ptr(argp));
}
dp = get_dp_locked(dp_idx);
goto exit;
switch (cmd) {
- case ODP_PORT_LIST32:
+ case ODP_VPORT_LIST32:
err = compat_list_ports(dp, compat_ptr(argp));
break;
- case ODP_PORT_GROUP_SET32:
- err = compat_set_port_group(dp, compat_ptr(argp));
- break;
-
- case ODP_PORT_GROUP_GET32:
- err = compat_get_port_group(dp, compat_ptr(argp));
- break;
-
case ODP_FLOW_PUT32:
err = compat_put_flow(dp, compat_ptr(argp));
break;
}
success:
copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
-
+
retval = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (copy_bytes == skb->len) {
__wsum csum = 0;
- int csum_start, csum_offset;
+ unsigned int csum_start, csum_offset;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was
- * also the start of data to be checksummed. Linux
- * 2.6.22 introduced the csum_start field for this
- * purpose, but we should point the transport header to
- * it anyway for backward compatibility, as
- * dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
+ csum_start = skb->csum_start - skb_headroom(skb);
csum_offset = skb->csum_offset;
#else
+ csum_start = skb_transport_header(skb) - skb->data;
csum_offset = skb->csum;
#endif
- csum_start = skb_transport_header(skb) - skb->data;
+ BUG_ON(csum_start >= skb_headlen(skb));
retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
copy_bytes - csum_start, &csum);
if (!retval) {
copy_bytes = csum_start;
csump = (__sum16 __user *)(buf + csum_start + csum_offset);
+
+ BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
put_user(csum_fold(csum), csump);
}
} else