diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 84a78e396a566c96efb723eb3a35598a73a468f8..bc2d0d80d1f91e73a16960dfb92efcd688db2e35 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -585,6 +585,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) struct rhash_head *he; spinlock_t *lock; unsigned int hash; + bool ret = false; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); @@ -602,17 +603,16 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) } rcu_assign_pointer(*pprev, obj->next); - atomic_dec(&ht->nelems); - - spin_unlock_bh(lock); - - rhashtable_wakeup_worker(ht); - - rcu_read_unlock(); - return true; + ret = true; + break; } + /* The entry may be linked in either 'tbl', 'future_tbl', or both. + * 'future_tbl' only exists for a short period of time during + * resizing. Thus traversing both is fine and the added cost is + * very rare. + */ if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) { spin_unlock_bh(lock); @@ -625,9 +625,15 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) } spin_unlock_bh(lock); + + if (ret) { + atomic_dec(&ht->nelems); + rhashtable_wakeup_worker(ht); + } + rcu_read_unlock(); - return false; + return ret; } EXPORT_SYMBOL_GPL(rhashtable_remove);