static u32 port_hash(const struct port_lookup_key *key)
{
- return jhash2((u32*)key, (sizeof(*key) / sizeof(u32)), 0);
+ return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0);
}
static inline struct hlist_head *find_bucket(u32 hash)
struct tnl_mutable_config *mutable;
mutable = rcu_dereference_rtnl(tnl_vport->mutable);
- if (!memcmp(&mutable->key, key, sizeof(*key))) {
+ if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
*pmutable = mutable;
return tnl_vport;
}
cache->hh_seq = hh_seq;
#else
- read_lock_bh(&rt_dst(rt).hh->hh_lock);
+ read_lock(&rt_dst(rt).hh->hh_lock);
memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
- read_unlock_bh(&rt_dst(rt).hh->hh_lock);
+ read_unlock(&rt_dst(rt).hh->hh_lock);
#endif
}
* If lock is contended fall back to directly building the header.
* We're not going to help performance by sitting here spinning.
*/
- if (!spin_trylock_bh(&tnl_vport->cache_lock))
+ if (!spin_trylock(&tnl_vport->cache_lock))
return NULL;
cache = cache_dereference(tnl_vport);
assign_cache_rcu(vport, cache);
unlock:
- spin_unlock_bh(&tnl_vport->cache_lock);
+ spin_unlock(&tnl_vport->cache_lock);
return cache;
}