kfree_skb(skb);
goto errout;
}
- err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
dp = kzalloc(sizeof *dp, GFP_KERNEL);
if (dp == NULL)
goto err_put_module;
-
+ INIT_LIST_HEAD(&dp->port_list);
mutex_init(&dp->mutex);
dp->dp_idx = dp_idx;
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_head_init(&dp->queues[i]);
init_waitqueue_head(&dp->waitqueue);
+ /* Allocate table. */
+ err = -ENOMEM;
+ rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
+ if (!dp->table)
+ goto err_free_dp;
+
/* Setup our datapath device */
dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL);
err = PTR_ERR(dp_dev);
if (IS_ERR(dp_dev))
- goto err_free_dp;
-
- err = -ENOMEM;
- rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
- if (!dp->table)
- goto err_destroy_dp_dev;
- INIT_LIST_HEAD(&dp->port_list);
+ goto err_destroy_table;
err = new_nbp(dp, dp_dev, ODPP_LOCAL);
- if (err)
+ if (err) {
+ dp_dev_destroy(dp_dev);
goto err_destroy_table;
+ }
dp->drop_frags = 0;
dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
return 0;
err_destroy_local_port:
- dp_del_port(dp->ports[ODPP_LOCAL], NULL);
+ dp_del_port(dp->ports[ODPP_LOCAL]);
err_destroy_table:
dp_table_destroy(dp->table, 0);
-err_destroy_dp_dev:
- dp_dev_destroy(dp_dev);
err_free_dp:
kfree(dp);
err_put_module:
return err;
}
-static void do_destroy_dp(struct datapath *dp, struct list_head *dp_devs)
+static void do_destroy_dp(struct datapath *dp)
{
struct net_bridge_port *p, *n;
int i;
+ list_for_each_entry_safe (p, n, &dp->port_list, node)
+ if (p->port_no != ODPP_LOCAL)
+ dp_del_port(p);
+
if (dp_del_dp_hook)
dp_del_dp_hook(dp);
- /* Drop references to DP. */
- list_for_each_entry_safe (p, n, &dp->port_list, node)
- dp_del_port(p, dp_devs);
-
rcu_assign_pointer(dps[dp->dp_idx], NULL);
- synchronize_rcu();
- /* Wait until no longer in use, then destroy it. */
- synchronize_rcu();
+ dp_del_port(dp->ports[ODPP_LOCAL]);
+
dp_table_destroy(dp->table, 1);
+
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
for (i = 0; i < DP_MAX_GROUPS; i++)
static int destroy_dp(int dp_idx)
{
- struct dp_dev *dp_dev, *next;
struct datapath *dp;
- LIST_HEAD(dp_devs);
int err;
rtnl_lock();
if (!dp)
goto err_unlock;
- do_destroy_dp(dp, &dp_devs);
+ do_destroy_dp(dp);
err = 0;
err_unlock:
mutex_unlock(&dp_mutex);
rtnl_unlock();
- list_for_each_entry_safe (dp_dev, next, &dp_devs, list)
- free_netdev(dp_dev->dev);
return err;
}
if (copy_from_user(&port, portp, sizeof port))
goto out;
port.devname[IFNAMSIZ - 1] = '\0';
- port_no = port.port;
-
- err = -EINVAL;
- if (port_no < 0 || port_no >= DP_MAX_PORTS)
- goto out;
rtnl_lock();
dp = get_dp_locked(dp_idx);
if (!dp)
goto out_unlock_rtnl;
- err = -EEXIST;
- if (dp->ports[port_no])
- goto out_unlock_dp;
+ for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
+ if (!dp->ports[port_no])
+ goto got_port_no;
+ err = -EXFULL;
+ goto out_unlock_dp;
+got_port_no:
if (!(port.flags & ODP_PORT_INTERNAL)) {
err = -ENODEV;
dev = dev_get_by_name(&init_net, port.devname);
if (dp_add_if_hook)
dp_add_if_hook(dp->ports[port_no]);
+ err = __put_user(port_no, &port.port);
+
out_put:
dev_put(dev);
out_unlock_dp:
return err;
}
-int dp_del_port(struct net_bridge_port *p, struct list_head *dp_devs)
+int dp_del_port(struct net_bridge_port *p)
{
ASSERT_RTNL();
if (is_dp_dev(p->dev)) {
dp_dev_destroy(p->dev);
- if (dp_devs) {
- struct dp_dev *dp_dev = dp_dev_priv(p->dev);
- list_add(&dp_dev->list, dp_devs);
- }
}
if (p->port_no != ODPP_LOCAL && dp_del_if_hook) {
dp_del_if_hook(p);
static int del_port(int dp_idx, int port_no)
{
- struct dp_dev *dp_dev, *next;
struct net_bridge_port *p;
struct datapath *dp;
LIST_HEAD(dp_devs);
if (!p)
goto out_unlock_dp;
- err = dp_del_port(p, &dp_devs);
+ err = dp_del_port(p);
out_unlock_dp:
mutex_unlock(&dp->mutex);
out_unlock_rtnl:
rtnl_unlock();
out:
- list_for_each_entry_safe (dp_dev, next, &dp_devs, list)
- free_netdev(dp_dev->dev);
return err;
}
struct sw_flow *flow;
WARN_ON_ONCE(skb_shared(skb));
- WARN_ON_ONCE(skb->destructor);
/* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
stats->n_bytes = flow->byte_count;
stats->ip_tos = flow->ip_tos;
stats->tcp_flags = flow->tcp_flags;
+ stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
if (!n_actions)
return 0;
- if (ufp->n_actions > INT_MAX / sizeof(union odp_action))
- return -EINVAL;
sf_acts = rcu_dereference(flow->sf_acts);
if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
return put_actions(flow, ufp);
}
-static int del_or_query_flow(struct datapath *dp,
- struct odp_flow __user *ufp,
- unsigned int cmd)
+static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
{
struct dp_table *table = rcu_dereference(dp->table);
struct odp_flow uf;
if (!flow)
goto error;
- if (cmd == ODP_FLOW_DEL) {
- /* XXX redundant lookup */
- error = dp_table_delete(table, flow);
- if (error)
- goto error;
+ /* XXX redundant lookup */
+ error = dp_table_delete(table, flow);
+ if (error)
+ goto error;
- /* XXX These statistics might lose a few packets, since other
- * CPUs can be using this flow. We used to synchronize_rcu()
- * to make sure that we get completely accurate stats, but that
- * blows our performance, badly. */
- dp->n_flows--;
- error = answer_query(flow, ufp);
- flow_deferred_free(flow);
- } else {
- error = answer_query(flow, ufp);
- }
+ /* XXX These statistics might lose a few packets, since other CPUs can
+ * be using this flow. We used to synchronize_rcu() to make sure that
+ * we get completely accurate stats, but that blows our performance,
+ * badly. */
+ dp->n_flows--;
+ error = answer_query(flow, ufp);
+ flow_deferred_free(flow);
error:
return error;
}
-static int query_multiple_flows(struct datapath *dp,
- const struct odp_flowvec *flowvec)
+static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
struct dp_table *table = rcu_dereference(dp->table);
int i;
flow = dp_table_lookup(table, &uf.key);
if (!flow)
- error = __clear_user(&ufp->stats, sizeof ufp->stats);
+ error = __put_user(ENOENT, &ufp->stats.error);
else
error = answer_query(flow, ufp);
if (error)
return err;
}
-static int
-get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
+static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
{
struct odp_stats stats;
int i;
break;
}
}
- return put_user(idx, &pvp->n_ports);
+ return put_user(dp->n_ports, &pvp->n_ports);
}
/* RCU callback for freeing a dp_port_group */
/* Handle commands with special locking requirements up front. */
switch (cmd) {
case ODP_DP_CREATE:
- return create_dp(dp_idx, (char __user *)argp);
+ err = create_dp(dp_idx, (char __user *)argp);
+ goto exit;
case ODP_DP_DESTROY:
- return destroy_dp(dp_idx);
+ err = destroy_dp(dp_idx);
+ goto exit;
case ODP_PORT_ADD:
- return add_port(dp_idx, (struct odp_port __user *)argp);
+ err = add_port(dp_idx, (struct odp_port __user *)argp);
+ goto exit;
case ODP_PORT_DEL:
err = get_user(port_no, (int __user *)argp);
- if (err)
- break;
- return del_port(dp_idx, port_no);
+ if (!err)
+ err = del_port(dp_idx, port_no);
+ goto exit;
}
dp = get_dp_locked(dp_idx);
+ err = -ENODEV;
if (!dp)
- return -ENODEV;
+ goto exit;
switch (cmd) {
case ODP_DP_STATS:
break;
case ODP_FLOW_DEL:
- case ODP_FLOW_GET:
- err = del_or_query_flow(dp, (struct odp_flow __user *)argp,
- cmd);
+ err = del_flow(dp, (struct odp_flow __user *)argp);
break;
- case ODP_FLOW_GET_MULTIPLE:
- err = do_flowvec_ioctl(dp, argp, query_multiple_flows);
+ case ODP_FLOW_GET:
+ err = do_flowvec_ioctl(dp, argp, query_flows);
break;
case ODP_FLOW_LIST:
break;
}
mutex_unlock(&dp->mutex);
+exit:
return err;
}
ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
loff_t *ppos)
{
+ /* XXX is there sufficient synchronization here? */
int listeners = (int) f->private_data;
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
{
+ /* XXX is there sufficient synchronization here? */
int dp_idx = iminor(file->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
unsigned int mask;