* It is safe to access the datapath and vport structures with just
* dp_mutex.
*/
-static struct datapath *dps[ODP_MAX];
+static struct datapath __rcu *dps[ODP_MAX];
static DEFINE_MUTEX(dp_mutex);
static int new_vport(struct datapath *, struct odp_port *, int port_no);
* @queues: %DP_N_QUEUES sets of queued packets for userspace to handle.
* @waitqueue: Waitqueue, for waiting for new packets in @queues.
* @n_flows: Number of flows currently in flow table.
- * @table: Current flow table (RCU protected).
+ * @table: Current flow table.
* @n_ports: Number of ports currently in @ports.
* @ports: Map from port number to &struct vport. %ODPP_LOCAL port
* always exists, other ports may be %NULL.
wait_queue_head_t waitqueue;
/* Flow table. */
- struct tbl *table;
+ struct tbl __rcu *table;
/* Switch ports. */
unsigned int n_ports;
- struct vport *ports[DP_MAX_PORTS];
+ struct vport __rcu *ports[DP_MAX_PORTS];
struct list_head port_list;
/* Stats. */
struct tbl_node tbl_node;
struct odp_flow_key key;
- struct sw_flow_actions *sf_acts;
+ struct sw_flow_actions __rcu *sf_acts;
atomic_t refcnt;
bool dead;
call_rcu(&table->rcu, destroy_table_rcu);
}
-static struct tbl_bucket **find_bucket(struct tbl *table, u32 hash)
+static struct tbl_bucket __rcu **find_bucket(struct tbl *table, u32 hash)
{
unsigned int l1 = (hash & (table->n_buckets - 1)) >> TBL_L1_SHIFT;
unsigned int l2 = hash & ((1 << TBL_L2_BITS) - 1);
struct tbl_node *tbl_lookup(struct tbl *table, void *target, u32 hash,
int (*cmp)(const struct tbl_node *, void *))
{
- struct tbl_bucket **bucketp = find_bucket(table, hash);
+ struct tbl_bucket __rcu **bucketp = find_bucket(table, hash);
struct tbl_bucket *bucket = rcu_dereference(*bucketp);
int index;
{
unsigned int i, j, k;
for (i = 0; i < table->n_buckets >> TBL_L1_BITS; i++) {
- struct tbl_bucket **l2 = table->buckets[i];
+ struct tbl_bucket __rcu **l2 = table->buckets[i];
for (j = 0; j < TBL_L1_SIZE; j++) {
struct tbl_bucket *bucket = rcu_dereference(l2[j]);
if (!bucket)
*/
int tbl_insert(struct tbl *table, struct tbl_node *target, u32 hash)
{
- struct tbl_bucket **oldp = find_bucket(table, hash);
+ struct tbl_bucket __rcu **oldp = find_bucket(table, hash);
struct tbl_bucket *old = rcu_dereference(*oldp);
unsigned int n = old ? old->n_objs : 0;
struct tbl_bucket *new = bucket_alloc(n + 1);
*/
int tbl_remove(struct tbl *table, struct tbl_node *target)
{
- struct tbl_bucket **oldp = find_bucket(table, target->hash);
+ struct tbl_bucket __rcu **oldp = find_bucket(table, target->hash);
struct tbl_bucket *old = rcu_dereference(*oldp);
unsigned int n = old->n_objs;
struct tbl_bucket *new;
struct tbl {
struct rcu_head rcu;
unsigned int n_buckets;
- struct tbl_bucket ***buckets;
+ struct tbl_bucket ** __rcu *buckets;
unsigned int count;
void (*obj_destructor)(struct tbl_node *);
};
#define CACHE_DATA_ALIGN 16
-/* Protected by RCU. */
-static struct tbl *port_table __read_mostly;
+static struct tbl __rcu *port_table __read_mostly;
static void cache_cleaner(struct work_struct *work);
static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
char name[IFNAMSIZ];
const struct tnl_ops *tnl_ops;
- struct tnl_mutable_config *mutable; /* Protected by RCU. */
+ struct tnl_mutable_config __rcu *mutable;
/*
* ID of last fragment sent (for tunnel protocols with direct support
atomic_t frag_id;
spinlock_t cache_lock;
- struct tnl_cache *cache; /* Protected by RCU/cache_lock. */
+ struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */
#ifdef NEED_CACHE_TIMEOUT
/*
char peer_name[IFNAMSIZ];
struct hlist_node hash_node;
- /* Protected by RCU. */
- struct vport *peer;
-
- /* Protected by RCU. */
- struct device_config *devconf;
+ struct vport __rcu *peer;
+ struct device_config __rcu *devconf;
};
/* Protected by RTNL lock. */