aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2015-01-16 11:13:09 +0800
committerDavid S. Miller <davem@davemloft.net>2015-01-16 01:18:51 -0500
commit57699a40b4f2694d3ee63fd5e6465ec8f600b620 (patch)
treeb3523af9d1685de66d9cba887007803bd2a1f467 /lib
parentMerge branch 'iw_cxgb4-next' (diff)
downloadlinux-dev-57699a40b4f2694d3ee63fd5e6465ec8f600b620.tar.xz
linux-dev-57699a40b4f2694d3ee63fd5e6465ec8f600b620.zip
rhashtable: Fix race in rhashtable_destroy() and use regular work_struct
When we put our declared work task in the global workqueue with schedule_delayed_work(), its delay parameter is always zero. Therefore, we should define a regular work in rhashtable structure instead of a delayed work. By the way, we add a condition to check whether resizing functions are NULL before cancelling the work, avoiding to cancel an uninitialized work. Lastly, while we wait for all work items we submitted before to run to completion with cancel_delayed_work(), ht->mutex has been taken in rhashtable_destroy(). Moreover, cancel_delayed_work() doesn't return until all work items are accomplished, and when work items are scheduled, the work's function - rht_deferred_worker() will be called. However, as rht_deferred_worker() also needs to acquire the lock, deadlock might happen at the moment as the lock is already held before. So if the cancel work function is moved out of the lock covered scope, this will avoid the deadlock. Fixes: 97defe1 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Signed-off-by: Ying Xue <ying.xue@windriver.com> Cc: Thomas Graf <tgraf@suug.ch> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index aca699813ba9..84a78e396a56 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -485,7 +485,7 @@ static void rht_deferred_worker(struct work_struct *work)
struct rhashtable *ht;
struct bucket_table *tbl;
- ht = container_of(work, struct rhashtable, run_work.work);
+ ht = container_of(work, struct rhashtable, run_work);
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
@@ -507,7 +507,7 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
if (tbl == new_tbl &&
((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
(ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
- schedule_delayed_work(&ht->run_work, 0);
+ schedule_work(&ht->run_work);
}
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
@@ -903,7 +903,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
if (ht->p.grow_decision || ht->p.shrink_decision)
- INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
+ INIT_WORK(&ht->run_work, rht_deferred_worker);
return 0;
}
@@ -921,11 +921,11 @@ void rhashtable_destroy(struct rhashtable *ht)
{
ht->being_destroyed = true;
- mutex_lock(&ht->mutex);
+ if (ht->p.grow_decision || ht->p.shrink_decision)
+ cancel_work_sync(&ht->run_work);
- cancel_delayed_work(&ht->run_work);
+ mutex_lock(&ht->mutex);
bucket_table_free(rht_dereference(ht->tbl, ht));
-
mutex_unlock(&ht->mutex);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);