It's possible to start receiving packets on a datapath as soon as
the internal device is created. It's therefore important that the
datapath be fully initialized before this, which it currently isn't.
In particular, the fact that dp->stats_percpu is not yet set is
potentially fatal. In addition, if allocation of the Netlink response
failed it would leak the percpu memory. This fixes both problems.
Found by code inspection, in practice the datapath is probably always
done initializing before someone can send a packet on it.
Signed-off-by: Jesse Gross <jesse@nicira.com>
Acked-by: Ben Pfaff <blp@nicira.com>
return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
}
return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
}
+static int get_dpifindex(struct datapath *dp)
+{
+ struct vport *local;
+ int ifindex;
+
+ rcu_read_lock();
+
+ local = get_vport_protected(dp, OVSP_LOCAL);
+ if (local)
+ ifindex = vport_get_ifindex(local);
+ else
+ ifindex = 0;
+
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
static inline size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
static inline size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
hdr->ifi_change = 0;
NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
hdr->ifi_change = 0;
NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
- NLA_PUT_U32(skb, IFLA_MASTER,
- vport_get_ifindex(get_vport_protected(dp, OVSP_LOCAL)));
+ NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
#define PACKET_N_MC_GROUPS 16
static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
#define PACKET_N_MC_GROUPS 16
static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
-static u32 packet_mc_group(struct datapath *dp, u8 cmd)
+static u32 packet_mc_group(int dp_ifindex, u8 cmd)
{
u32 idx;
BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
{
u32 idx;
BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
- idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
+ idx = jhash_2words(dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
return packet_mc_groups[idx].id;
}
return packet_mc_groups[idx].id;
}
static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
- u32 group = packet_mc_group(dp, upcall_info->cmd);
+ int dp_ifindex;
+ u32 group;
struct sk_buff *nskb;
int err;
struct sk_buff *nskb;
int err;
+ dp_ifindex = get_dpifindex(dp);
+ if (!dp_ifindex) {
+ err = -ENODEV;
+ nskb = skb->next;
+ goto err_kfree_skbs;
+ }
+
+ group = packet_mc_group(dp_ifindex, upcall_info->cmd);
+
do {
struct ovs_header *upcall;
struct sk_buff *user_skb; /* to be queued to userspace */
do {
struct ovs_header *upcall;
struct sk_buff *user_skb; /* to be queued to userspace */
}
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
}
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
- upcall->dp_ifindex = dp->dp_ifindex;
+ upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
flow_to_nlattrs(upcall_info->key, user_skb);
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
flow_to_nlattrs(upcall_info->key, user_skb);
if (!ovs_header)
return -EMSGSIZE;
if (!ovs_header)
return -EMSGSIZE;
- ovs_header->dp_ifindex = dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(dp);
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
struct ovs_header *ovs_header;
struct nlattr *nla;
int err;
struct ovs_header *ovs_header;
struct nlattr *nla;
int err;
+ int dp_ifindex = get_dpifindex(dp);
ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
flags, cmd);
if (!ovs_header)
goto error;
ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
flags, cmd);
if (!ovs_header)
goto error;
- ovs_header->dp_ifindex = dp->dp_ifindex;
+ ovs_header->dp_ifindex = dp_ifindex;
rcu_read_lock();
err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
rcu_read_lock();
err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
if (!nla)
goto nla_put_failure;
nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
if (!nla)
goto nla_put_failure;
- NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS, packet_mc_group(dp, OVS_PACKET_CMD_MISS));
- NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION, packet_mc_group(dp, OVS_PACKET_CMD_ACTION));
- NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE, packet_mc_group(dp, OVS_PACKET_CMD_SAMPLE));
+ NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS,
+ packet_mc_group(dp_ifindex, OVS_PACKET_CMD_MISS));
+ NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION,
+ packet_mc_group(dp_ifindex, OVS_PACKET_CMD_ACTION));
+ NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE,
+ packet_mc_group(dp_ifindex, OVS_PACKET_CMD_SAMPLE));
nla_nest_end(skb, nla);
return genlmsg_end(skb, ovs_header);
nla_nest_end(skb, nla);
return genlmsg_end(skb, ovs_header);
if (!dp->table)
goto err_free_dp;
if (!dp->table)
goto err_free_dp;
+ dp->drop_frags = 0;
+ dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
+ goto err_destroy_table;
+ }
+
+ change_datapath(dp, a);
+
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
parms.type = OVS_VPORT_TYPE_INTERNAL;
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
parms.type = OVS_VPORT_TYPE_INTERNAL;
if (err == -EBUSY)
err = -EEXIST;
if (err == -EBUSY)
err = -EEXIST;
- goto err_destroy_table;
+ goto err_destroy_percpu;
- dp->dp_ifindex = vport_get_ifindex(vport);
-
- dp->drop_frags = 0;
- dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
- if (!dp->stats_percpu) {
- err = -ENOMEM;
- goto err_destroy_local_port;
- }
-
- change_datapath(dp, a);
reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
err_destroy_local_port:
dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
err_destroy_local_port:
dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
+err_destroy_percpu:
+ free_percpu(dp->stats_percpu);
err_destroy_table:
flow_tbl_destroy(get_table_protected(dp));
err_free_dp:
err_destroy_table:
flow_tbl_destroy(get_table_protected(dp));
err_free_dp:
if (!ovs_header)
return -EMSGSIZE;
if (!ovs_header)
return -EMSGSIZE;
- ovs_header->dp_ifindex = vport->dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(vport->dp);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
/**
* struct datapath - datapath for flow-based packet switching
* @rcu: RCU callback head for deferred destruction.
/**
* struct datapath - datapath for flow-based packet switching
* @rcu: RCU callback head for deferred destruction.
- * @dp_ifindex: ifindex of local port.
* @list_node: Element in global 'dps' list.
* @ifobj: Represents /sys/class/net/<devname>/brif. Protected by RTNL.
* @drop_frags: Drop all IP fragments if nonzero.
* @list_node: Element in global 'dps' list.
* @ifobj: Represents /sys/class/net/<devname>/brif. Protected by RTNL.
* @drop_frags: Drop all IP fragments if nonzero.
*/
struct datapath {
struct rcu_head rcu;
*/
struct datapath {
struct rcu_head rcu;
struct list_head list_node;
struct kobject ifobj;
struct list_head list_node;
struct kobject ifobj;