aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2015-01-07 13:41:53 +0800
committerDavid S. Miller <davem@davemloft.net>2015-01-08 19:47:10 -0800
commit54c5b7d311c8e1801f9dcce9f388a7420a25fa90 (patch)
tree9bbd313b7a6676fb1752abf7dd3fe736c18114bc /lib/rhashtable.c
parentrhashtable: optimize rhashtable_lookup routine (diff)
downloadlinux-dev-54c5b7d311c8e1801f9dcce9f388a7420a25fa90.tar.xz
linux-dev-54c5b7d311c8e1801f9dcce9f388a7420a25fa90.zip
rhashtable: introduce rhashtable_wakeup_worker helper function
Introduce rhashtable_wakeup_worker() helper function to reduce duplicated code where to wake up worker. By the way, as long as the both "future_tbl" and "tbl" bucket table pointers point to the same bucket array, we should try to wake up the resizing worker thread, otherwise, it indicates the work of resizing hash table is not finished yet. However, currently we will wake up the worker thread only when the two pointers point to different bucket array. Obviously this is wrong. So, the issue is also fixed as well in the patch. Signed-off-by: Ying Xue <ying.xue@windriver.com> Cc: Thomas Graf <tgraf@suug.ch> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--lib/rhashtable.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index f2fdd7a7cb16..20006854fce0 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -492,6 +492,19 @@ static void rht_deferred_worker(struct work_struct *work)
mutex_unlock(&ht->mutex);
}
+static void rhashtable_wakeup_worker(struct rhashtable *ht)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ size_t size = tbl->size;
+
+ /* Only adjust the table if no resizing is currently in progress. */
+ if (tbl == new_tbl &&
+ ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
+ (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
+ schedule_delayed_work(&ht->run_work, 0);
+}
+
/**
* rhashtable_insert - insert object into hash hash table
* @ht: hash table
@@ -532,10 +545,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
atomic_inc(&ht->nelems);
- /* Only grow the table if no resizing is currently in progress. */
- if (ht->tbl != ht->future_tbl &&
- ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
- schedule_delayed_work(&ht->run_work, 0);
+ rhashtable_wakeup_worker(ht);
rcu_read_unlock();
}
@@ -584,10 +594,7 @@ restart:
spin_unlock_bh(lock);
- if (ht->tbl != ht->future_tbl &&
- ht->p.shrink_decision &&
- ht->p.shrink_decision(ht, tbl->size))
- schedule_delayed_work(&ht->run_work, 0);
+ rhashtable_wakeup_worker(ht);
rcu_read_unlock();