These steps are sequentially in lockstep, so we might as well combine them.
This expands the region over which the vport_lock is held. I didn't
carefully verify that this was OK.
This also eliminates the synchronize_rcu() call from destruction of tunnel
vports, since they didn't appear to me to need it.
It should be possible to eliminate the synchronize_rcu() from the netdev,
patch, and internal_dev vports, but this commit does not do that.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
{
struct vport_parms parms;
struct vport *vport;
- int err;
parms.name = odp_port->devname;
parms.type = odp_port->type;
if (IS_ERR(vport))
return PTR_ERR(vport);
- err = vport_attach(vport);
- if (err) {
- vport_del(vport);
- return err;
- }
-
rcu_assign_pointer(dp->ports[port_no], vport);
list_add_rcu(&vport->node, &dp->port_list);
dp->n_ports++;
list_del_rcu(&p->node);
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
- err = vport_detach(p);
- if (err)
- return err;
-
- /* Then wait until no one is still using it, and destroy it. */
- synchronize_rcu();
-
+ /* Then destroy it. */
vport_lock();
- vport_del(p);
+ err = vport_del(p);
vport_unlock();
- return 0;
+ return err;
}
static int detach_port(int dp_idx, int port_no)
switch (event) {
case NETDEV_UNREGISTER:
- mutex_lock(&dp->mutex);
- dp_detach_port(vport);
- mutex_unlock(&dp->mutex);
+ if (!is_internal_dev(dev)) {
+ mutex_lock(&dp->mutex);
+ dp_detach_port(vport);
+ mutex_unlock(&dp->mutex);
+ }
break;
case NETDEV_CHANGENAME:
#include "vport-netdev.h"
struct internal_dev {
- struct vport *attached_vport, *vport;
+ struct vport *vport;
struct net_device_stats stats;
};
struct vport *vport = internal_dev_get_vport(netdev);
strcpy(info->driver, "openvswitch");
-
- if (!vport)
- return;
-
sprintf(info->bus_info, "%d.%d", vport->dp->dp_idx, vport->port_no);
}
if (new_mtu < 68)
return -EINVAL;
- if (vport && new_mtu > dp_min_mtu(vport->dp))
+ if (new_mtu > dp_min_mtu(vport->dp))
return -EINVAL;
netdev->mtu = new_mtu;
if (err)
goto error_free_netdev;
+ dev_set_promiscuity(netdev_vport->dev, 1);
+ netif_start_queue(netdev_vport->dev);
+
return vport;
error_free_netdev:
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- unregister_netdevice(netdev_vport->dev);
- vport_free(vport);
-
- return 0;
-}
-
-static int internal_dev_attach(struct vport *vport)
-{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- struct internal_dev *internal_dev = internal_dev_priv(netdev_vport->dev);
-
- rcu_assign_pointer(internal_dev->attached_vport, internal_dev->vport);
- dev_set_promiscuity(netdev_vport->dev, 1);
- netif_start_queue(netdev_vport->dev);
-
- return 0;
-}
-
-static int internal_dev_detach(struct vport *vport)
-{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- struct internal_dev *internal_dev = internal_dev_priv(netdev_vport->dev);
-
netif_stop_queue(netdev_vport->dev);
dev_set_promiscuity(netdev_vport->dev, -1);
- rcu_assign_pointer(internal_dev->attached_vport, NULL);
+
+ synchronize_rcu();
+
+ unregister_netdevice(netdev_vport->dev);
+ vport_free(vport);
return 0;
}
.flags = VPORT_F_REQUIRED | VPORT_F_GEN_STATS | VPORT_F_FLOW,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
- .attach = internal_dev_attach,
- .detach = internal_dev_detach,
.set_mtu = netdev_set_mtu,
.set_addr = netdev_set_addr,
.get_name = netdev_get_name,
if (!is_internal_dev(netdev))
return NULL;
-
+
internal_dev = internal_dev_priv(netdev);
- return rcu_dereference(internal_dev->attached_vport);
+ return rcu_dereference(internal_dev->vport);
}
vport_set_stats(vport, &stats);
}
+ err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+ vport);
+ if (err)
+ goto error_put;
+
+ dev_set_promiscuity(netdev_vport->dev, 1);
+ dev_disable_lro(netdev_vport->dev);
+ netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+
return vport;
error_put:
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- dev_put(netdev_vport->dev);
- vport_free(vport);
-
- return 0;
-}
-
-static int netdev_attach(struct vport *vport)
-{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- int err;
-
- err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
- vport);
- if (err)
- return err;
-
- dev_set_promiscuity(netdev_vport->dev, 1);
- dev_disable_lro(netdev_vport->dev);
- netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
-
- return 0;
-}
-
-static int netdev_detach(struct vport *vport)
-{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
dev_set_promiscuity(netdev_vport->dev, -1);
+ synchronize_rcu();
+
+ dev_put(netdev_vport->dev);
+ vport_free(vport);
+
return 0;
}
.exit = netdev_exit,
.create = netdev_create,
.destroy = netdev_destroy,
- .attach = netdev_attach,
- .detach = netdev_detach,
.set_mtu = netdev_set_mtu,
.set_addr = netdev_set_addr,
.get_name = netdev_get_name,
static struct hlist_head *peer_table;
#define PEER_HASH_BUCKETS 256
+static void update_peers(const char *name, struct vport *);
+
static inline struct patch_vport *patch_vport_priv(const struct vport *vport)
{
return vport_priv(vport);
* bottleneck on systems using jumbo frames. */
patch_vport->devconf->mtu = 65535;
+ hlist_add_head(&patch_vport->hash_node, hash_bucket(patch_vport->peer_name));
+
+ rcu_assign_pointer(patch_vport->peer, vport_locate(patch_vport->peer_name));
+ update_peers(patch_vport->name, vport);
+
return vport;
error_free_vport:
{
struct patch_vport *patch_vport = patch_vport_priv(vport);
+ update_peers(patch_vport->name, NULL);
+ rcu_assign_pointer(patch_vport->peer, NULL);
+
+ hlist_del(&patch_vport->hash_node);
+
+ synchronize_rcu();
+
kfree(patch_vport->devconf);
vport_free(vport);
rcu_assign_pointer(peer_vport->peer, vport);
}
-static int patch_attach(struct vport *vport)
-{
- struct patch_vport *patch_vport = patch_vport_priv(vport);
-
- hlist_add_head(&patch_vport->hash_node, hash_bucket(patch_vport->peer_name));
-
- rcu_assign_pointer(patch_vport->peer, vport_locate(patch_vport->peer_name));
- update_peers(patch_vport->name, vport);
-
- return 0;
-}
-
-static int patch_detach(struct vport *vport)
-{
- struct patch_vport *patch_vport = patch_vport_priv(vport);
-
- update_peers(patch_vport->name, NULL);
- rcu_assign_pointer(patch_vport->peer, NULL);
-
- hlist_del(&patch_vport->hash_node);
-
- return 0;
-}
-
static int patch_set_mtu(struct vport *vport, int mtu)
{
struct patch_vport *patch_vport = patch_vport_priv(vport);
.create = patch_create,
.modify = patch_modify,
.destroy = patch_destroy,
- .attach = patch_attach,
- .detach = patch_detach,
.set_mtu = patch_set_mtu,
.set_addr = patch_set_addr,
.get_name = patch_get_name,
*
* @parms: Information about new vport.
*
- * Creates a new vport with the specified configuration (which is dependent
- * on device type). Both RTNL and vport locks must be held.
+ * Creates a new vport with the specified configuration (which is dependent on
+ * device type) and attaches it to a datapath. Both RTNL and vport locks must
+ * be held.
*/
struct vport *vport_add(const struct vport_parms *parms)
{
*
* @vport: vport to delete.
*
- * Deletes the specified device. The device must not be currently attached to
- * a datapath. It is possible to fail for reasons such as lack of memory.
- * Both RTNL and vport locks must be held.
+ * Detaches @vport from its datapath and destroys it. It is possible to fail
+ * for reasons such as lack of memory. Both RTNL and vport locks must be held.
*/
int vport_del(struct vport *vport)
{
return vport->ops->destroy(vport);
}
-/**
- * vport_attach - notify a vport that it has been attached to a datapath
- *
- * @vport: vport to attach.
- *
- * Performs vport-specific actions so that packets may be exchanged. RTNL lock
- * and the appropriate DP mutex must be held.
- */
-int vport_attach(struct vport *vport)
-{
- ASSERT_RTNL();
-
- if (vport->ops->attach)
- return vport->ops->attach(vport);
-
- return 0;
-}
-
-/**
- * vport_detach - detach a vport from a datapath
- *
- * @vport: vport to detach.
- *
- * Performs vport-specific actions before a vport is detached from a datapath.
- * May fail for a variety of reasons, including lack of memory. RTNL lock and
- * the appropriate DP mutex must be held.
- */
-int vport_detach(struct vport *vport)
-{
- ASSERT_RTNL();
-
- if (vport->ops->detach)
- return vport->ops->detach(vport);
- return 0;
-}
-
/**
* vport_set_mtu - set device MTU (for kernel callers)
*
struct vport *vport_locate(const char *name);
-int vport_attach(struct vport *);
-int vport_detach(struct vport *);
-
int vport_set_mtu(struct vport *, int mtu);
int vport_set_addr(struct vport *, const unsigned char *);
int vport_set_stats(struct vport *, struct rtnl_link_stats64 *);
* a new vport allocated with vport_alloc(), otherwise an ERR_PTR() value.
* @modify: Modify the configuration of an existing vport. May be null if
* modification is not supported.
- * @destroy: Destroy and free a vport using vport_free(). Prior to destruction
- * @detach will be called followed by synchronize_rcu().
- * @attach: Attach a previously created vport to a datapath. After attachment
- * packets may be sent and received. Prior to attachment any packets may be
- * silently discarded. May be null if not needed.
- * @detach: Detach a vport from a datapath. May be null if not needed.
+ * @destroy: Detach and destroy a vport.
* @set_mtu: Set the device's MTU. May be null if not supported.
* @set_addr: Set the device's MAC address. May be null if not supported.
* @set_stats: Provides stats as an offset to be added to the device stats.
int (*modify)(struct vport *, struct odp_port *);
int (*destroy)(struct vport *);
- int (*attach)(struct vport *);
- int (*detach)(struct vport *);
-
int (*set_mtu)(struct vport *, int mtu);
int (*set_addr)(struct vport *, const unsigned char *);
int (*set_stats)(const struct vport *, struct rtnl_link_stats64 *);