aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-14 13:57:23 +1100
committerDavid S. Miller <davem@davemloft.net>2015-03-15 01:35:34 -0400
commit9d901bc05153bbf33b5da2cd6266865e531f0545 (patch)
tree57518695b14d3f67e66c6e25ab2878f04158bf25 /include/linux/rhashtable.h
parentrhashtable: Move seed init into bucket_table_alloc (diff)
downloadlinux-dev-9d901bc05153bbf33b5da2cd6266865e531f0545.tar.xz
linux-dev-9d901bc05153bbf33b5da2cd6266865e531f0545.zip
rhashtable: Free bucket tables asynchronously after rehash
There is in fact no need to wait for an RCU grace period in the rehash function, since all insertions are guaranteed to go into the new table through spin locks. This patch uses call_rcu to free the old/rehashed table at our leisure. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 4192682c1d5c..a0abddd226b3 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -54,6 +54,7 @@ struct rhash_head {
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
* @buckets: size * hash buckets
*/
struct bucket_table {
@@ -63,6 +64,7 @@ struct bucket_table {
unsigned int locks_mask;
spinlock_t *locks;
struct list_head walkers;
+ struct rcu_head rcu;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};