/* Searches 'chain' for a flow matching 'key', which must not have any wildcard
* fields. Returns the flow if successful, otherwise a null pointer.
*
- * Caller must hold rcu_read_lock, and not release it until it is done with the
- * returned flow. */
+ * Caller must hold rcu_read_lock or dp_mutex. */
struct sw_flow *chain_lookup(struct sw_chain *chain,
const struct sw_flow_key *key)
{
* If successful, 'flow' becomes owned by the chain, otherwise it is retained
* by the caller.
*
- * Caller must hold rcu_read_lock. If insertion is successful, it must not
- * release rcu_read_lock until it is done with the inserted flow. */
+ * Caller must hold dp_mutex. */
int chain_insert(struct sw_chain *chain, struct sw_flow *flow)
{
int i;
* iterating through the entire contents of each table for keys that contain
* wildcards. Relatively cheap for fully specified keys.
*
- * The caller need not hold any locks. */
+ * Caller must hold dp_mutex. */
int chain_delete(struct sw_chain *chain, const struct sw_flow_key *key,
uint16_t priority, int strict)
{
for (i = 0; i < chain->n_tables; i++) {
struct sw_table *t = chain->tables[i];
- rcu_read_lock();
count += t->delete(t, key, priority, strict);
- rcu_read_unlock();
}
return count;
* Expensive as currently implemented, since it iterates through the entire
* contents of each table.
*
- * The caller need not hold any locks. */
+ * Caller must not hold dp_mutex, because individual tables take and release it
+ * as necessary. */
int chain_timeout(struct sw_chain *chain)
{
int count = 0;
/* It's hard to imagine wanting more than one datapath, but... */
#define DP_MAX 32
-/* datapaths. Protected on the read side by rcu_read_lock, on the write side
- * by dp_mutex.
+/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
+ * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
+ * maintained by the Generic Netlink code, but the timeout path needs mutual
+ * exclusion too.
*
* It is safe to access the datapath and net_bridge_port structures with just
- * the dp_mutex, but to access the chain you need to take the rcu_read_lock
- * also (because dp_mutex doesn't prevent flows from being destroyed).
+ * dp_mutex.
*/
static struct datapath *dps[DP_MAX];
-static DEFINE_MUTEX(dp_mutex);
+DEFINE_MUTEX(dp_mutex);
static int dp_maint_func(void *data);
static int send_port_status(struct net_bridge_port *p, uint8_t status);
}
/* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a
- * negative error code.
- *
- * Not called with any locks. */
+ * negative error code. */
static int new_dp(int dp_idx)
{
struct datapath *dp;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- mutex_lock(&dp_mutex);
- dp = rcu_dereference(dps[dp_idx]);
- if (dp != NULL) {
+ /* Exit early if a datapath with that number already exists. */
+ if (dps[dp_idx]) {
err = -EEXIST;
goto err_unlock;
}
if (IS_ERR(dp->dp_task))
goto err_destroy_chain;
- rcu_assign_pointer(dps[dp_idx], dp);
- mutex_unlock(&dp_mutex);
+ dps[dp_idx] = dp;
return 0;
err_free_dp:
kfree(dp);
err_unlock:
- mutex_unlock(&dp_mutex);
module_put(THIS_MODULE);
return err;
}
-/* Find and return a free port number under 'dp'. Called under dp_mutex. */
+/* Find and return a free port number under 'dp'. */
static int find_portno(struct datapath *dp)
{
int i;
return p;
}
-/* Called with dp_mutex. */
int add_switch_port(struct datapath *dp, struct net_device *dev)
{
struct net_bridge_port *p;
return 0;
}
-/* Delete 'p' from switch.
- * Called with dp_mutex. */
+/* Delete 'p' from switch. */
static int del_switch_port(struct net_bridge_port *p)
{
/* First drop references to device. */
return 0;
}
-/* Called with dp_mutex. */
static void del_dp(struct datapath *dp)
{
struct net_bridge_port *p, *n;
}
#else
/* NB: This has only been tested on 2.4.35 */
-
-/* Called without any locks (?) */
static void dp_frame_hook(struct sk_buff *skb)
{
struct net_bridge_port *p = skb->dev->br_port;
if (!info->attrs[DP_GENL_A_DP_IDX])
return -EINVAL;
- mutex_lock(&dp_mutex);
dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
if (!dp)
err = -ENOENT;
del_dp(dp);
err = 0;
}
- mutex_unlock(&dp_mutex);
return err;
}
return -EINVAL;
/* Get datapath. */
- mutex_lock(&dp_mutex);
dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
if (!dp) {
err = -ENOENT;
out_put:
dev_put(port);
out:
- mutex_unlock(&dp_mutex);
return err;
}
struct datapath *dp;
struct ofp_header *oh;
struct sender sender;
+ int err;
if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
return -EINVAL;
sender.xid = oh->xid;
sender.pid = info->snd_pid;
sender.seq = info->snd_seq;
- return fwd_control_input(dp->chain, &sender,
- nla_data(va), nla_len(va));
+
+ mutex_lock(&dp_mutex);
+ err = fwd_control_input(dp->chain, &sender,
+ nla_data(va), nla_len(va));
+ mutex_unlock(&dp_mutex);
+ return err;
}
static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
#ifndef DATAPATH_H
#define DATAPATH_H 1
+#include <linux/mutex.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
uint32_t seq; /* Netlink sequence ID of request. */
};
+extern struct mutex dp_mutex;
+
int dp_output_port(struct datapath *, struct sk_buff *, int out_port);
int dp_output_control(struct datapath *, struct sk_buff *, uint32_t,
size_t, int);
uint64_t packet_count = 0;
int i = 0;
+ mutex_lock(&dp_mutex);
list_for_each_entry_rcu (flow, &td->flows, node) {
/* xxx Retrieve the packet count associated with this entry
* xxx and store it in "packet_count".
table_dummy_sfw_destroy(sfw);
}
spin_unlock_bh(&pending_free_lock);
+ mutex_unlock(&dp_mutex);
if (del_count)
atomic_sub(del_count, &td->n_flows);
unsigned int i;
int count = 0;
+ mutex_lock(&dp_mutex);
for (i = 0; i <= th->bucket_mask; i++) {
struct sw_flow **bucket = &th->buckets[i];
struct sw_flow *flow = *bucket;
dp_send_flow_expired(dp, flow);
}
}
+ mutex_unlock(&dp_mutex);
if (count)
atomic_sub(count, &th->n_flows);
struct sw_flow *flow;
int count = 0;
+ mutex_lock(&dp_mutex);
list_for_each_entry_rcu (flow, &tl->flows, node) {
if (flow_timeout(flow)) {
count += do_delete(swt, flow);
dp_send_flow_expired(dp, flow);
}
}
+ mutex_unlock(&dp_mutex);
+
if (count)
atomic_sub(count, &tl->n_flows);
return count;