Use hash table to store ports of datapath. Allow 64K ports per switch.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
Bug #2462
if (unlikely(!skb))
return -ENOMEM;
- vport = rcu_dereference(dp->ports[out_port]);
+ vport = ovs_vport_rcu(dp, out_port);
if (unlikely(!vport)) {
kfree_skb(skb);
return -ENODEV;
/* Must be called with rcu_read_lock or RTNL lock. */
const char *ovs_dp_name(const struct datapath *dp)
{
- struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+ struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
return vport->ops->get_name(vport);
}
rcu_read_lock();
- local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+ local = ovs_vport_rcu(dp, OVSP_LOCAL);
if (local)
ifindex = local->ops->get_ifindex(local);
else
ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
+ kfree(dp->ports);
kobject_put(&dp->ifobj);
}
+static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
+ u16 port_no)
+{
+ return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
+}
+
+struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
+{
+ struct vport *vport;
+ struct hlist_node *n;
+ struct hlist_head *head;
+
+ head = vport_hash_bucket(dp, port_no);
+ hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
+ if (vport->port_no == port_no)
+ return vport;
+ }
+ return NULL;
+}
+
/* Called with RTNL lock and genl_lock. */
static struct vport *new_vport(const struct vport_parms *parms)
{
vport = ovs_vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
+ struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
- rcu_assign_pointer(dp->ports[parms->port_no], vport);
- list_add(&vport->node, &dp->port_list);
-
+ hlist_add_head_rcu(&vport->dp_hash_node, head);
dp_ifinfo_notify(RTM_NEWLINK, vport);
}
-
return vport;
}
if (p->port_no != OVSP_LOCAL)
ovs_dp_sysfs_del_if(p);
+
dp_ifinfo_notify(RTM_DELLINK, p);
/* First drop references to device. */
- list_del(&p->node);
- rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+ hlist_del_rcu(&p->dp_hash_node);
/* Then destroy it. */
ovs_vport_del(p);
struct datapath *dp;
struct vport *vport;
struct ovs_net *ovs_net;
- int err;
+ int err, i;
err = -EINVAL;
if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
if (dp == NULL)
goto err_unlock_rtnl;
- INIT_LIST_HEAD(&dp->port_list);
-
/* Initialize kobject for bridge. This will be added as
* /sys/class/net/<devname>/brif later, if sysfs is enabled. */
dp->ifobj.kset = NULL;
}
ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
+ dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!dp->ports) {
+ err = -ENOMEM;
+ goto err_destroy_percpu;
+ }
+
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+ INIT_HLIST_HEAD(&dp->ports[i]);
+
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
parms.type = OVS_VPORT_TYPE_INTERNAL;
if (err == -EBUSY)
err = -EEXIST;
- goto err_destroy_percpu;
+ goto err_destroy_ports_array;
}
reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
return 0;
err_destroy_local_port:
- ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+ ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
+err_destroy_ports_array:
+ kfree(dp->ports);
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
/* Called with genl_mutex. */
static void __dp_destroy(struct datapath *dp)
{
- struct vport *vport, *next_vport;
+ int i;
rtnl_lock();
- list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
- if (vport->port_no != OVSP_LOCAL)
- ovs_dp_detach_port(vport);
+
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+ struct vport *vport;
+ struct hlist_node *node, *n;
+
+ hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
+ if (vport->port_no != OVSP_LOCAL)
+ ovs_dp_detach_port(vport);
+ }
ovs_dp_sysfs_del_dp(dp);
list_del(&dp->list_node);
- ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+ ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
/* rtnl_unlock() will wait until all the references to devices that
* are pending unregistration have been dropped. We do it here to
if (!dp)
return ERR_PTR(-ENODEV);
- vport = rcu_dereference_rtnl(dp->ports[port_no]);
+ vport = ovs_vport_rtnl_rcu(dp, port_no);
if (!vport)
return ERR_PTR(-ENOENT);
return vport;
if (port_no >= DP_MAX_PORTS)
goto exit_unlock;
- vport = rtnl_dereference(dp->ports[port_no]);
+ vport = ovs_vport_rtnl(dp, port_no);
err = -EBUSY;
if (vport)
goto exit_unlock;
err = -EFBIG;
goto exit_unlock;
}
- vport = rtnl_dereference(dp->ports[port_no]);
+ vport = ovs_vport_rtnl(dp, port_no);
if (!vport)
break;
}
{
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
struct datapath *dp;
- u32 port_no;
- int retval;
+ int bucket = cb->args[0], skip = cb->args[1];
+ int i, j = 0;
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp)
return -ENODEV;
rcu_read_lock();
- for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+ for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
-
- vport = rcu_dereference(dp->ports[port_no]);
- if (!vport)
- continue;
-
- if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- OVS_VPORT_CMD_NEW) < 0)
- break;
+ struct hlist_node *n;
+
+ j = 0;
+ hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
+ if (j >= skip &&
+ ovs_vport_cmd_fill_info(vport, skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+ OVS_VPORT_CMD_NEW) < 0)
+ goto out;
+
+ j++;
+ }
+ skip = 0;
}
+out:
rcu_read_unlock();
- cb->args[0] = port_no;
- retval = skb->len;
+ cb->args[0] = i;
+ cb->args[1] = j;
- return retval;
+ return skb->len;
}
static struct genl_ops dp_vport_genl_ops[] = {
#include "vlan.h"
#include "vport.h"
-#define DP_MAX_PORTS 1024
+#define DP_MAX_PORTS USHRT_MAX
+#define DP_VPORT_HASH_BUCKETS 1024
+
#define SAMPLE_ACTION_DEPTH 3
/**
* @ifobj: Represents /sys/class/net/<devname>/brif. Protected by RTNL.
* @n_flows: Number of flows currently in flow table.
* @table: Current flow table. Protected by genl_lock and RCU.
- * @ports: Map from port number to &struct vport. %OVSP_LOCAL port
- * always exists, other ports may be %NULL. Protected by RTNL and RCU.
- * @port_list: List of all ports in @ports in arbitrary order. RTNL required
- * to iterate or modify.
+ * @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
+ * RTNL and RCU.
* @stats_percpu: Per-CPU datapath statistics.
* @net: Reference to net namespace.
*
struct flow_table __rcu *table;
/* Switch ports. */
- struct vport __rcu *ports[DP_MAX_PORTS];
- struct list_head port_list;
+ struct hlist_head *ports;
/* Stats. */
struct dp_stats_percpu __percpu *stats_percpu;
write_pnet(&dp->net, net);
}
+struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
+
+static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return ovs_lookup_vport(dp, port_no);
+}
+
+static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
+ return ovs_lookup_vport(dp, port_no);
+}
+
+static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
+{
+ ASSERT_RTNL();
+ return ovs_lookup_vport(dp, port_no);
+}
+
extern struct notifier_block ovs_dp_device_notifier;
extern struct genl_multicast_group ovs_dp_vport_multicast_group;
extern int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
*/
int ovs_dp_sysfs_add_dp(struct datapath *dp)
{
- struct vport *vport = rtnl_dereference(dp->ports[OVSP_LOCAL]);
+ struct vport *vport = ovs_vport_rtnl(dp, OVSP_LOCAL);
struct kobject *kobj = vport->ops->get_kobj(vport);
int err;
int ovs_dp_sysfs_del_dp(struct datapath *dp)
{
- struct vport *vport = rtnl_dereference(dp->ports[OVSP_LOCAL]);
+ struct vport *vport = ovs_vport_rtnl(dp, OVSP_LOCAL);
struct kobject *kobj = vport->ops->get_kobj(vport);
#ifdef CONFIG_NET_NS
int ovs_dp_sysfs_add_if(struct vport *p)
{
struct datapath *dp = p->dp;
- struct vport *local_port = rtnl_dereference(dp->ports[OVSP_LOCAL]);
+ struct vport *local_port = ovs_vport_rtnl(dp, OVSP_LOCAL);
struct brport_attribute **a;
int err;
int actions_len = nla_len(actions);
struct sw_flow_actions *sfa;
- /* At least DP_MAX_PORTS actions are required to be able to flood a
- * packet to every port. Factor of 2 allows for setting VLAN tags,
- * etc. */
- if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+ if (actions_len > MAX_ACTIONS_BUFSIZE)
return ERR_PTR(-EINVAL);
sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
swkey->phy.in_port = in_port;
attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
} else {
- swkey->phy.in_port = USHRT_MAX;
+ swkey->phy.in_port = DP_MAX_PORTS;
}
if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) {
const struct nlattr *nla;
int rem;
- *in_port = USHRT_MAX;
+ *in_port = DP_MAX_PORTS;
*tun_id = 0;
*priority = 0;
if (swkey->phy.tun_id != cpu_to_be64(0))
NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun_id);
- if (swkey->phy.in_port != USHRT_MAX)
+ if (swkey->phy.in_port != DP_MAX_PORTS)
NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
struct {
__be64 tun_id; /* Encapsulating tunnel ID. */
u32 priority; /* Packet QoS priority. */
- u16 in_port; /* Input switch port (or USHRT_MAX). */
+ u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
} phy;
struct {
u8 src[ETH_ALEN]; /* Ethernet source address. */
int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
const struct nlattr *);
+#define MAX_ACTIONS_BUFSIZE (16 * 1024)
#define TBL_MIN_BUCKETS 1024
struct flow_table {
vport->port_no = parms->port_no;
vport->upcall_pid = parms->upcall_pid;
vport->ops = ops;
+ INIT_HLIST_NODE(&vport->dp_hash_node);
/* Initialize kobject for bridge. This will be added as
* /sys/class/net/<devname>/brport later, if sysfs is enabled. */
* @upcall_pid: The Netlink port to use for packets received on this port that
* miss the flow table.
* @hash_node: Element in @dev_table hash table in vport.c.
+ * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
* @ops: Class structure.
* @percpu_stats: Points to per-CPU statistics used and maintained by vport
* @stats_lock: Protects @err_stats and @offset_stats.
u32 upcall_pid;
struct hlist_node hash_node;
+ struct hlist_node dp_hash_node;
const struct vport_ops *ops;
struct vport_percpu_stats __percpu *percpu_stats;
#include "vlog.h"
VLOG_DEFINE_THIS_MODULE(dpif_linux);
-
-enum { LRU_MAX_PORTS = 1024 };
-enum { LRU_MASK = LRU_MAX_PORTS - 1};
-BUILD_ASSERT_DECL(IS_POW2(LRU_MAX_PORTS));
+enum { MAX_PORTS = USHRT_MAX };
enum { N_UPCALL_SOCKS = 16 };
BUILD_ASSERT_DECL(IS_POW2(N_UPCALL_SOCKS));
struct nln_notifier *port_notifier;
bool change_error;
- /* Queue of unused ports. */
- unsigned long *lru_bitmap;
- uint16_t lru_ports[LRU_MAX_PORTS];
- size_t lru_head;
- size_t lru_tail;
+ /* Port number allocation. */
+ uint16_t alloc_port_no;
};
static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
return CONTAINER_OF(dpif, struct dpif_linux, dpif);
}
-static void
-dpif_linux_push_port(struct dpif_linux *dp, uint16_t port)
-{
- if (port < LRU_MAX_PORTS && !bitmap_is_set(dp->lru_bitmap, port)) {
- bitmap_set1(dp->lru_bitmap, port);
- dp->lru_ports[dp->lru_head++ & LRU_MASK] = port;
- }
-}
-
-static uint32_t
-dpif_linux_pop_port(struct dpif_linux *dp)
-{
- uint16_t port;
-
- if (dp->lru_head == dp->lru_tail) {
- return UINT32_MAX;
- }
-
- port = dp->lru_ports[dp->lru_tail++ & LRU_MASK];
- bitmap_set0(dp->lru_bitmap, port);
- return port;
-}
-
static int
dpif_linux_enumerate(struct sset *all_dps)
{
open_dpif(const struct dpif_linux_dp *dp, struct dpif **dpifp)
{
struct dpif_linux *dpif;
- int i;
dpif = xzalloc(sizeof *dpif);
dpif->port_notifier = nln_notifier_create(nln, dpif_linux_port_changed,
dpif->dp_ifindex = dp->dp_ifindex;
sset_init(&dpif->changed_ports);
*dpifp = &dpif->dpif;
-
- dpif->lru_bitmap = bitmap_allocate(LRU_MAX_PORTS);
- bitmap_set1(dpif->lru_bitmap, OVSP_LOCAL);
- for (i = 1; i < LRU_MAX_PORTS; i++) {
- dpif_linux_push_port(dpif, i);
- }
}
static void
nln_notifier_destroy(dpif->port_notifier);
destroy_upcall_socks(dpif);
sset_destroy(&dpif->changed_ports);
- free(dpif->lru_bitmap);
free(dpif);
}
struct dpif_linux_vport request, reply;
const struct ofpbuf *options;
struct ofpbuf *buf;
- int error;
+ int error, i = 0, max_ports = MAX_PORTS;
dpif_linux_vport_init(&request);
request.cmd = OVS_VPORT_CMD_NEW;
do {
uint32_t upcall_pid;
- request.port_no = dpif_linux_pop_port(dpif);
+ request.port_no = ++dpif->alloc_port_no;
upcall_pid = dpif_linux_port_get_pid(dpif_, request.port_no);
request.upcall_pid = &upcall_pid;
error = dpif_linux_vport_transact(&request, &reply, &buf);
*port_nop = reply.port_no;
VLOG_DBG("%s: assigning port %"PRIu32" to netlink pid %"PRIu32,
dpif_name(dpif_), request.port_no, upcall_pid);
+ } else if (error == EFBIG) {
+ /* Older datapath has lower limit. */
+ max_ports = dpif->alloc_port_no;
+ dpif->alloc_port_no = 0;
}
+
ofpbuf_delete(buf);
- } while (request.port_no != UINT32_MAX
+ } while ((i++ < max_ports)
&& (error == EBUSY || error == EFBIG));
return error;
vport.port_no = port_no;
error = dpif_linux_vport_transact(&vport, NULL, NULL);
- if (!error) {
- dpif_linux_push_port(dpif, port_no);
- }
return error;
}
static int
dpif_linux_get_max_ports(const struct dpif *dpif OVS_UNUSED)
{
- /* If the datapath increases its range of supported ports, then it should
- * start reporting that. */
- return 1024;
+ return MAX_PORTS;
}
static uint32_t
struct dpif_linux_port_state {
struct nl_dump dump;
- unsigned long *port_bitmap; /* Ports in the datapath. */
- bool complete; /* Dump completed without error. */
};
static int
struct ofpbuf *buf;
*statep = state = xmalloc(sizeof *state);
- state->port_bitmap = bitmap_allocate(LRU_MAX_PORTS);
- state->complete = false;
dpif_linux_vport_init(&request);
request.cmd = OVS_DP_CMD_GET;
int error;
if (!nl_dump_next(&state->dump, &buf)) {
- state->complete = true;
return EOF;
}
return error;
}
- if (vport.port_no < LRU_MAX_PORTS) {
- bitmap_set1(state->port_bitmap, vport.port_no);
- }
-
dpif_port->name = (char *) vport.name;
dpif_port->type = (char *) netdev_vport_get_netdev_type(&vport);
dpif_port->port_no = vport.port_no;
}
static int
-dpif_linux_port_dump_done(const struct dpif *dpif_, void *state_)
+dpif_linux_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
{
- struct dpif_linux *dpif = dpif_linux_cast(dpif_);
struct dpif_linux_port_state *state = state_;
int error = nl_dump_done(&state->dump);
- if (state->complete) {
- uint16_t i;
-
- for (i = 0; i < LRU_MAX_PORTS; i++) {
- if (!bitmap_is_set(state->port_bitmap, i)) {
- dpif_linux_push_port(dpif, i);
- }
- }
- }
-
- free(state->port_bitmap);
free(state);
return error;
}