#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
static struct genl_family dp_genl_family;
-static struct genl_multicast_group mc_group;
+
+/*
+ * Datapath multicast groups.
+ *
+ * Really we want one multicast group per in-use datapath (or even more than
+ * one). Locking issues, however, mean that we can't allocate a multicast
+ * group at the point in the code where we we actually create a datapath[*], so
+ * we have to pre-allocate them. It's massive overkill to allocate DP_MAX of
+ * them in advance, since we will hardly ever actually create DP_MAX datapaths,
+ * so instead we allocate a few multicast groups at startup and choose one for
+ * each datapath by hashing its datapath index.
+ *
+ * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
+ * mutex, and genl_register_mc_group, called to acquire a new multicast
+ * group ID, also acquires genl_lock, thus deadlock.
+ */
+#define N_MC_GROUPS 16 /* Must be power of 2. */
+static struct genl_multicast_group mc_groups[N_MC_GROUPS];
/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
* by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
return oh;
}
+/* Returns the ID of the multicast group used by datapath 'dp'. */
+static u32
+dp_mc_group(const struct datapath *dp)
+{
+ return mc_groups[dp->dp_idx & (N_MC_GROUPS - 1)].id;
+}
+
/* Sends 'skb' to 'sender' if it is nonnull, otherwise multicasts 'skb' to all
* listeners. */
static int
-send_openflow_skb(struct sk_buff *skb, const struct sender *sender)
+send_openflow_skb(const struct datapath *dp,
+ struct sk_buff *skb, const struct sender *sender)
{
return (sender
? genlmsg_unicast(skb, sender->pid)
- : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
+ : genlmsg_multicast(skb, 0, dp_mc_group(dp), GFP_ATOMIC));
}
/* Retrieves the datapath id, which is the MAC address of the "of" device. */
opi->reason = reason;
opi->pad = 0;
skb_copy_bits(skb, 0, opi->data, fwd_len);
- err = send_openflow_skb(f_skb, NULL);
+ err = send_openflow_skb(dp, f_skb, NULL);
out:
kfree_skb(skb);
/* Shrink to fit. */
ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
resize_openflow_skb(skb, &ofr->header, ofr_len);
- return send_openflow_skb(skb, sender);
+ return send_openflow_skb(dp, skb, sender);
}
int
osc->flags = htons(dp->flags);
osc->miss_send_len = htons(dp->miss_send_len);
- return send_openflow_skb(skb, sender);
+ return send_openflow_skb(dp, skb, sender);
}
int
if (!reply)
return -ENOMEM;
- return send_openflow_skb(skb, sender);
+ return send_openflow_skb(dp, skb, sender);
}
}
memset(ops->pad, 0, sizeof ops->pad);
fill_port_desc(p, &ops->desc);
- return send_openflow_skb(skb, NULL);
+ return send_openflow_skb(p->dp, skb, NULL);
}
/* Convert jiffies_64 to milliseconds. */
nfe->packet_count = cpu_to_be64(flow->packet_count);
nfe->byte_count = cpu_to_be64(flow->byte_count);
- return send_openflow_skb(skb, NULL);
+ return send_openflow_skb(dp, skb, NULL);
}
EXPORT_SYMBOL(dp_send_flow_end);
oem->code = htons(code);
memcpy(oem->data, data, len);
- return send_openflow_skb(skb, sender);
+ return send_openflow_skb(dp, skb, sender);
}
int
return -ENOMEM;
memcpy(reply + 1, rq + 1, ntohs(rq->length) - sizeof *rq);
- return send_openflow_skb(skb, sender);
+ return send_openflow_skb(dp, skb, sender);
}
/* Generic Netlink interface.
/* Queries a datapath for related information. Currently the only relevant
* information is the datapath's multicast group ID, datapath ID, and
- * datapath device name. Really we want one multicast group per datapath,
- * but because of locking issues[*] we can't easily get one. Thus, every
- * datapath will currently return the same global multicast group ID, but
- * in the future it would be nice to fix that.
- *
- * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
- * mutex, and genl_register_mc_group, called to acquire a new multicast
- * group ID, also acquires genl_lock, thus deadlock.
- */
+ * datapath device name. */
static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
{
struct datapath *dp;
goto err;
NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp->dp_idx);
NLA_PUT_STRING(ans_skb, DP_GENL_A_DP_NAME, dp->netdev->name);
- NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
+ NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, dp_mc_group(dp));
genlmsg_end(ans_skb, data);
err = genlmsg_reply(ans_skb, info);
goto err_unregister;
}
- strcpy(mc_group.name, "openflow");
- err = genl_register_mc_group(&dp_genl_family, &mc_group);
- if (err < 0)
- goto err_unregister;
+ for (i = 0; i < N_MC_GROUPS; i++) {
+ snprintf(mc_groups[i].name, sizeof mc_groups[i].name,
+ "openflow%d", i);
+ err = genl_register_mc_group(&dp_genl_family, &mc_groups[i]);
+ if (err < 0)
+ goto err_unregister;
+ }
return 0;