int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
EXPORT_SYMBOL(dp_ioctl_hook);
-int (*dp_add_dp_hook)(struct datapath *dp);
-EXPORT_SYMBOL(dp_add_dp_hook);
-
-int (*dp_del_dp_hook)(struct datapath *dp);
-EXPORT_SYMBOL(dp_del_dp_hook);
-
-int (*dp_add_if_hook)(struct net_bridge_port *p);
-EXPORT_SYMBOL(dp_add_if_hook);
-
-int (*dp_del_if_hook)(struct net_bridge_port *p);
-EXPORT_SYMBOL(dp_del_if_hook);
-
/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
* by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
* maintained by the Generic Netlink code, but the timeout path needs mutual
kfree_skb(skb);
goto errout;
}
- err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
+static void release_dp(struct kobject *kobj)
+{
+ struct datapath *dp = container_of(kobj, struct datapath, ifobj);
+ kfree(dp);
+}
+
+struct kobj_type dp_ktype = {
+ .release = release_dp
+};
+
static int create_dp(int dp_idx, const char __user *devnamep)
{
struct net_device *dp_dev;
skb_queue_head_init(&dp->queues[i]);
init_waitqueue_head(&dp->waitqueue);
+ /* Initialize kobject for bridge. This will be added as
+ * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
+ dp->ifobj.kset = NULL;
+ kobject_init(&dp->ifobj, &dp_ktype);
+
/* Allocate table. */
err = -ENOMEM;
rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
mutex_unlock(&dp_mutex);
rtnl_unlock();
- if (dp_add_dp_hook)
- dp_add_dp_hook(dp);
+ dp_sysfs_add_dp(dp);
return 0;
if (p->port_no != ODPP_LOCAL)
dp_del_port(p);
- if (dp_del_dp_hook)
- dp_del_dp_hook(dp);
+ dp_sysfs_del_dp(dp);
rcu_assign_pointer(dps[dp->dp_idx], NULL);
for (i = 0; i < DP_MAX_GROUPS; i++)
kfree(dp->groups[i]);
free_percpu(dp->stats_percpu);
- kfree(dp);
+ kobject_put(&dp->ifobj);
module_put(THIS_MODULE);
}
return err;
}
+static void release_nbp(struct kobject *kobj)
+{
+ struct net_bridge_port *p = container_of(kobj, struct net_bridge_port, kobj);
+ kfree(p);
+}
+
+struct kobj_type brport_ktype = {
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &brport_sysfs_ops,
+#endif
+ .release = release_nbp
+};
+
/* Called with RTNL lock and dp_mutex. */
static int new_nbp(struct datapath *dp, struct net_device *dev, int port_no)
{
list_add_rcu(&p->node, &dp->port_list);
dp->n_ports++;
+ /* Initialize kobject for bridge. This will be added as
+ * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
+ p->kobj.kset = NULL;
+ kobject_init(&p->kobj, &brport_ktype);
+
dp_ifinfo_notify(RTM_NEWLINK, p);
return 0;
if (copy_from_user(&port, portp, sizeof port))
goto out;
port.devname[IFNAMSIZ - 1] = '\0';
- port_no = port.port;
-
- err = -EINVAL;
- if (port_no < 0 || port_no >= DP_MAX_PORTS)
- goto out;
rtnl_lock();
dp = get_dp_locked(dp_idx);
if (!dp)
goto out_unlock_rtnl;
- err = -EEXIST;
- if (dp->ports[port_no])
- goto out_unlock_dp;
+ for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
+ if (!dp->ports[port_no])
+ goto got_port_no;
+ err = -EXFULL;
+ goto out_unlock_dp;
+got_port_no:
if (!(port.flags & ODP_PORT_INTERNAL)) {
err = -ENODEV;
dev = dev_get_by_name(&init_net, port.devname);
if (err)
goto out_put;
- if (dp_add_if_hook)
- dp_add_if_hook(dp->ports[port_no]);
+ dp_sysfs_add_if(dp->ports[port_no]);
+
+ err = __put_user(port_no, &port.port);
out_put:
dev_put(dev);
{
ASSERT_RTNL();
-#ifdef SUPPORT_SYSFS
- if (p->port_no != ODPP_LOCAL && dp_del_if_hook)
- sysfs_remove_link(&p->dp->ifobj, p->dev->name);
-#endif
+ if (p->port_no != ODPP_LOCAL)
+ dp_sysfs_del_if(p);
dp_ifinfo_notify(RTM_DELLINK, p);
p->dp->n_ports--;
/* Then wait until no one is still using it, and destroy it. */
synchronize_rcu();
- if (is_dp_dev(p->dev)) {
+ if (is_dp_dev(p->dev))
dp_dev_destroy(p->dev);
- }
- if (p->port_no != ODPP_LOCAL && dp_del_if_hook) {
- dp_del_if_hook(p);
- } else {
- dev_put(p->dev);
- kfree(p);
- }
+ dev_put(p->dev);
+ kobject_put(&p->kobj);
return 0;
}
#error
#endif
-#ifdef CONFIG_XEN
+#if defined(CONFIG_XEN) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
/* This code is copied verbatim from net/dev/core.c in Xen's
* linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions
* directly because they aren't exported. */
}
}
-int skb_checksum_setup(struct sk_buff *skb)
+int vswitch_skb_checksum_setup(struct sk_buff *skb)
{
if (skb->proto_csum_blank) {
if (skb->protocol != htons(ETH_P_IP))
out:
return -EPROTO;
}
-#endif
+#else
+int vswitch_skb_checksum_setup(struct sk_buff *skb) { return 0; }
+#endif /* CONFIG_XEN && linux == 2.6.18 */
int
dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
* the non-Xen case, but it is difficult to trigger or test this case
* there, hence the WARN_ON_ONCE().
*/
- err = skb_checksum_setup(skb);
+ err = vswitch_skb_checksum_setup(skb);
if (err)
goto err_kfree_skb;
#ifndef CHECKSUM_HW
stats->n_bytes = flow->byte_count;
stats->ip_tos = flow->ip_tos;
stats->tcp_flags = flow->tcp_flags;
+ stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
if (!n_actions)
return 0;
- if (ufp->n_actions > INT_MAX / sizeof(union odp_action))
- return -EINVAL;
sf_acts = rcu_dereference(flow->sf_acts);
if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
return put_actions(flow, ufp);
}
-static int del_or_query_flow(struct datapath *dp,
- struct odp_flow __user *ufp,
- unsigned int cmd)
+static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
{
struct dp_table *table = rcu_dereference(dp->table);
struct odp_flow uf;
if (!flow)
goto error;
- if (cmd == ODP_FLOW_DEL) {
- /* XXX redundant lookup */
- error = dp_table_delete(table, flow);
- if (error)
- goto error;
+ /* XXX redundant lookup */
+ error = dp_table_delete(table, flow);
+ if (error)
+ goto error;
- /* XXX These statistics might lose a few packets, since other
- * CPUs can be using this flow. We used to synchronize_rcu()
- * to make sure that we get completely accurate stats, but that
- * blows our performance, badly. */
- dp->n_flows--;
- error = answer_query(flow, ufp);
- flow_deferred_free(flow);
- } else {
- error = answer_query(flow, ufp);
- }
+ /* XXX These statistics might lose a few packets, since other CPUs can
+ * be using this flow. We used to synchronize_rcu() to make sure that
+ * we get completely accurate stats, but that blows our performance,
+ * badly. */
+ dp->n_flows--;
+ error = answer_query(flow, ufp);
+ flow_deferred_free(flow);
error:
return error;
}
-static int query_multiple_flows(struct datapath *dp,
- const struct odp_flowvec *flowvec)
+static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
struct dp_table *table = rcu_dereference(dp->table);
int i;
flow = dp_table_lookup(table, &uf.key);
if (!flow)
- error = __clear_user(&ufp->stats, sizeof ufp->stats);
+ error = __put_user(ENOENT, &ufp->stats.error);
else
error = answer_query(flow, ufp);
if (error)
return err;
}
-static int
-get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
+static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
{
struct odp_stats stats;
int i;
break;
}
}
- return put_user(idx, &pvp->n_ports);
+ return put_user(dp->n_ports, &pvp->n_ports);
}
/* RCU callback for freeing a dp_port_group */
/* Handle commands with special locking requirements up front. */
switch (cmd) {
case ODP_DP_CREATE:
- return create_dp(dp_idx, (char __user *)argp);
+ err = create_dp(dp_idx, (char __user *)argp);
+ goto exit;
case ODP_DP_DESTROY:
- return destroy_dp(dp_idx);
+ err = destroy_dp(dp_idx);
+ goto exit;
case ODP_PORT_ADD:
- return add_port(dp_idx, (struct odp_port __user *)argp);
+ err = add_port(dp_idx, (struct odp_port __user *)argp);
+ goto exit;
case ODP_PORT_DEL:
err = get_user(port_no, (int __user *)argp);
- if (err)
- break;
- return del_port(dp_idx, port_no);
+ if (!err)
+ err = del_port(dp_idx, port_no);
+ goto exit;
}
dp = get_dp_locked(dp_idx);
+ err = -ENODEV;
if (!dp)
- return -ENODEV;
+ goto exit;
switch (cmd) {
case ODP_DP_STATS:
break;
case ODP_FLOW_DEL:
- case ODP_FLOW_GET:
- err = del_or_query_flow(dp, (struct odp_flow __user *)argp,
- cmd);
+ err = del_flow(dp, (struct odp_flow __user *)argp);
break;
- case ODP_FLOW_GET_MULTIPLE:
- err = do_flowvec_ioctl(dp, argp, query_multiple_flows);
+ case ODP_FLOW_GET:
+ err = do_flowvec_ioctl(dp, argp, query_flows);
break;
case ODP_FLOW_LIST:
break;
}
mutex_unlock(&dp->mutex);
+exit:
return err;
}