From: Ben Pfaff Date: Mon, 21 Jul 2008 20:59:10 +0000 (-0700) Subject: Simplify use of dp_mutex. X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=47b8652d;p=openvswitch Simplify use of dp_mutex. There was little point in taking the dp_mutex farther down in the code than dp_genl_openflow, since that function is already completely serialized by genl_rcv across the genl_mutex. We could get rid of dp_mutex completely, except that we still need it to serialize timeout. --- diff --git a/datapath/chain.c b/datapath/chain.c index fd13a868..f7ea1dc6 100644 --- a/datapath/chain.c +++ b/datapath/chain.c @@ -62,8 +62,7 @@ error: /* Searches 'chain' for a flow matching 'key', which must not have any wildcard * fields. Returns the flow if successful, otherwise a null pointer. * - * Caller must hold rcu_read_lock, and not release it until it is done with the - * returned flow. */ + * Caller must hold rcu_read_lock or dp_mutex. */ struct sw_flow *chain_lookup(struct sw_chain *chain, const struct sw_flow_key *key) { @@ -85,8 +84,7 @@ struct sw_flow *chain_lookup(struct sw_chain *chain, * If successful, 'flow' becomes owned by the chain, otherwise it is retained * by the caller. * - * Caller must hold rcu_read_lock. If insertion is successful, it must not - * release rcu_read_lock until it is done with the inserted flow. */ + * Caller must hold dp_mutex. */ int chain_insert(struct sw_chain *chain, struct sw_flow *flow) { int i; @@ -107,7 +105,7 @@ int chain_insert(struct sw_chain *chain, struct sw_flow *flow) * iterating through the entire contents of each table for keys that contain * wildcards. Relatively cheap for fully specified keys. * - * The caller need not hold any locks. */ + * Caller must hold dp_mutex. */ int chain_delete(struct sw_chain *chain, const struct sw_flow_key *key, uint16_t priority, int strict) { @@ -116,9 +114,7 @@ int chain_delete(struct sw_chain *chain, const struct sw_flow_key *key, for (i = 0; i < chain->n_tables; i++) { struct sw_table *t = chain->tables[i]; - rcu_read_lock(); count += t->delete(t, key, priority, strict); - rcu_read_unlock(); } return count; @@ -130,7 +126,8 @@ int chain_delete(struct sw_chain *chain, const struct sw_flow_key *key, * Expensive as currently implemented, since it iterates through the entire * contents of each table. * - * The caller need not hold any locks. */ + * Caller must not hold dp_mutex, because individual tables take and release it + * as necessary. */ int chain_timeout(struct sw_chain *chain) { int count = 0; diff --git a/datapath/datapath.c b/datapath/datapath.c index f81388d6..d55ac2ea 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -63,15 +63,16 @@ static struct genl_multicast_group mc_group; /* It's hard to imagine wanting more than one datapath, but... */ #define DP_MAX 32 -/* datapaths. Protected on the read side by rcu_read_lock, on the write side - * by dp_mutex. +/* Datapaths. Protected on the read side by rcu_read_lock, on the write side + * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex + * maintained by the Generic Netlink code, but the timeout path needs mutual + * exclusion too. * * It is safe to access the datapath and net_bridge_port structures with just - * the dp_mutex, but to access the chain you need to take the rcu_read_lock - * also (because dp_mutex doesn't prevent flows from being destroyed). + * dp_mutex. */ static struct datapath *dps[DP_MAX]; -static DEFINE_MUTEX(dp_mutex); +DEFINE_MUTEX(dp_mutex); static int dp_maint_func(void *data); static int send_port_status(struct net_bridge_port *p, uint8_t status); @@ -241,9 +242,7 @@ uint64_t gen_datapath_id(uint16_t dp_idx) } /* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a - * negative error code. - * - * Not called with any locks. */ + * negative error code. */ static int new_dp(int dp_idx) { struct datapath *dp; @@ -255,9 +254,8 @@ static int new_dp(int dp_idx) if (!try_module_get(THIS_MODULE)) return -ENODEV; - mutex_lock(&dp_mutex); - dp = rcu_dereference(dps[dp_idx]); - if (dp != NULL) { + /* Exit early if a datapath with that number already exists. */ + if (dps[dp_idx]) { err = -EEXIST; goto err_unlock; } @@ -292,8 +290,7 @@ static int new_dp(int dp_idx) if (IS_ERR(dp->dp_task)) goto err_destroy_chain; - rcu_assign_pointer(dps[dp_idx], dp); - mutex_unlock(&dp_mutex); + dps[dp_idx] = dp; return 0; @@ -306,12 +303,11 @@ err_destroy_dp_dev: err_free_dp: kfree(dp); err_unlock: - mutex_unlock(&dp_mutex); module_put(THIS_MODULE); return err; } -/* Find and return a free port number under 'dp'. Called under dp_mutex. */ +/* Find and return a free port number under 'dp'. */ static int find_portno(struct datapath *dp) { int i; @@ -349,7 +345,6 @@ static struct net_bridge_port *new_nbp(struct datapath *dp, return p; } -/* Called with dp_mutex. */ int add_switch_port(struct datapath *dp, struct net_device *dev) { struct net_bridge_port *p; @@ -373,8 +368,7 @@ int add_switch_port(struct datapath *dp, struct net_device *dev) return 0; } -/* Delete 'p' from switch. - * Called with dp_mutex. */ +/* Delete 'p' from switch. */ static int del_switch_port(struct net_bridge_port *p) { /* First drop references to device. */ @@ -398,7 +392,6 @@ static int del_switch_port(struct net_bridge_port *p) return 0; } -/* Called with dp_mutex. */ static void del_dp(struct datapath *dp) { struct net_bridge_port *p, *n; @@ -466,8 +459,6 @@ static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb) } #else /* NB: This has only been tested on 2.4.35 */ - -/* Called without any locks (?) */ static void dp_frame_hook(struct sk_buff *skb) { struct net_bridge_port *p = skb->dev->br_port; @@ -881,7 +872,6 @@ static int dp_genl_del(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[DP_GENL_A_DP_IDX]) return -EINVAL; - mutex_lock(&dp_mutex); dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]))); if (!dp) err = -ENOENT; @@ -889,7 +879,6 @@ static int dp_genl_del(struct sk_buff *skb, struct genl_info *info) del_dp(dp); err = 0; } - mutex_unlock(&dp_mutex); return err; } @@ -973,7 +962,6 @@ static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info) return -EINVAL; /* Get datapath. */ - mutex_lock(&dp_mutex); dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX])); if (!dp) { err = -ENOENT; @@ -1002,7 +990,6 @@ static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info) out_put: dev_put(port); out: - mutex_unlock(&dp_mutex); return err; } @@ -1028,6 +1015,7 @@ static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; struct ofp_header *oh; struct sender sender; + int err; if (!info->attrs[DP_GENL_A_DP_IDX] || !va) return -EINVAL; @@ -1043,8 +1031,12 @@ static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info) sender.xid = oh->xid; sender.pid = info->snd_pid; sender.seq = info->snd_seq; - return fwd_control_input(dp->chain, &sender, - nla_data(va), nla_len(va)); + + mutex_lock(&dp_mutex); + err = fwd_control_input(dp->chain, &sender, + nla_data(va), nla_len(va)); + mutex_unlock(&dp_mutex); + return err; } static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = { diff --git a/datapath/datapath.h b/datapath/datapath.h index bb714d35..aa313e81 100644 --- a/datapath/datapath.h +++ b/datapath/datapath.h @@ -3,6 +3,7 @@ #ifndef DATAPATH_H #define DATAPATH_H 1 +#include #include #include #include @@ -67,6 +68,8 @@ struct sender { uint32_t seq; /* Netlink sequence ID of request. */ }; +extern struct mutex dp_mutex; + int dp_output_port(struct datapath *, struct sk_buff *, int out_port); int dp_output_control(struct datapath *, struct sk_buff *, uint32_t, size_t, int); diff --git a/datapath/hwtable_dummy/hwtable_dummy.c b/datapath/hwtable_dummy/hwtable_dummy.c index d84dde0c..449b94bf 100644 --- a/datapath/hwtable_dummy/hwtable_dummy.c +++ b/datapath/hwtable_dummy/hwtable_dummy.c @@ -167,6 +167,7 @@ static int table_dummy_timeout(struct datapath *dp, struct sw_table *swt) uint64_t packet_count = 0; int i = 0; + mutex_lock(&dp_mutex); list_for_each_entry_rcu (flow, &td->flows, node) { /* xxx Retrieve the packet count associated with this entry * xxx and store it in "packet_count". @@ -199,6 +200,7 @@ static int table_dummy_timeout(struct datapath *dp, struct sw_table *swt) table_dummy_sfw_destroy(sfw); } spin_unlock_bh(&pending_free_lock); + mutex_unlock(&dp_mutex); if (del_count) atomic_sub(del_count, &td->n_flows); diff --git a/datapath/table-hash.c b/datapath/table-hash.c index 9c8b532f..c972fbcc 100644 --- a/datapath/table-hash.c +++ b/datapath/table-hash.c @@ -121,6 +121,7 @@ static int table_hash_timeout(struct datapath *dp, struct sw_table *swt) unsigned int i; int count = 0; + mutex_lock(&dp_mutex); for (i = 0; i <= th->bucket_mask; i++) { struct sw_flow **bucket = &th->buckets[i]; struct sw_flow *flow = *bucket; @@ -130,6 +131,7 @@ static int table_hash_timeout(struct datapath *dp, struct sw_table *swt) dp_send_flow_expired(dp, flow); } } + mutex_unlock(&dp_mutex); if (count) atomic_sub(count, &th->n_flows); diff --git a/datapath/table-linear.c b/datapath/table-linear.c index 68c3aed1..04587e5f 100644 --- a/datapath/table-linear.c +++ b/datapath/table-linear.c @@ -113,6 +113,7 @@ static int table_linear_timeout(struct datapath *dp, struct sw_table *swt) struct sw_flow *flow; int count = 0; + mutex_lock(&dp_mutex); list_for_each_entry_rcu (flow, &tl->flows, node) { if (flow_timeout(flow)) { count += do_delete(swt, flow); @@ -120,6 +121,8 @@ static int table_linear_timeout(struct datapath *dp, struct sw_table *swt) dp_send_flow_expired(dp, flow); } } + mutex_unlock(&dp_mutex); + if (count) atomic_sub(count, &tl->n_flows); return count;