aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2017-02-11 19:26:47 +0800
committerDavid S. Miller <davem@davemloft.net>2017-02-17 12:28:35 -0500
commitda20420f83ea0fbcf3d03afda08d971ea1d8a356 (patch)
tree0a6a08124301510af4c6f6926efc9ae7afd8c03f /include/linux/rhashtable.h
parenttipc: Fix tipc_sk_reinit race conditions (diff)
downloadlinux-dev-da20420f83ea0fbcf3d03afda08d971ea1d8a356.tar.xz
linux-dev-da20420f83ea0fbcf3d03afda08d971ea1d8a356.zip
rhashtable: Add nested tables
This patch adds code that handles GFP_ATOMIC kmalloc failure on insertion. As we cannot use vmalloc, we solve it by making our hash table nested. That is, we allocate single pages at each level and reach our desired table size by nesting them. When a nested table is created, only a single page is allocated at the top-level. Lower levels are allocated on demand during insertion. Therefore for each insertion to succeed, only two (non-consecutive) pages are needed. After a nested table is created, a rehash will be scheduled in order to switch to a vmalloced table as soon as possible. Also, the rehash code will never rehash into a nested table. If we detect a nested table during a rehash, the rehash will be aborted and a new rehash will be scheduled. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h78
1 files changed, 56 insertions, 22 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5c132d3188be..f2e12a845910 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -61,6 +61,7 @@ struct rhlist_head {
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
+ * @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[]
@@ -68,10 +69,12 @@ struct rhlist_head {
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
+ * @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets
*/
struct bucket_table {
unsigned int size;
+ unsigned int nest;
unsigned int rehash;
u32 hash_rnd;
unsigned int locks_mask;
@@ -81,7 +84,7 @@ struct bucket_table {
struct bucket_table __rcu *future_tbl;
- struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+ struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/**
@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
+struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash);
+
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+static inline struct rhash_head __rcu *const *rht_bucket(
+ const struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_var(
+ struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
+ &tbl->buckets[hash];
+}
+
/**
* rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor.
@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
- rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+ rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/**
* rht_for_each_entry_continue - continue iterating over hash chain
@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member)
/**
@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
-#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = next, \
- next = !rht_is_a_nulls(pos) ? \
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/**
@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_rcu(pos, tbl, hash) \
- rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+ rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member)
/**
@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- const struct bucket_table *tbl;
+ struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -697,8 +727,12 @@ slow_path:
}
elasticity = ht->elasticity;
- pprev = &tbl->buckets[hash];
- rht_for_each(head, tbl, hash) {
+ pprev = rht_bucket_insert(ht, tbl, hash);
+ data = ERR_PTR(-ENOMEM);
+ if (!pprev)
+ goto out;
+
+ rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;
@@ -736,7 +770,7 @@ slow_path:
if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path;
- head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+ head = rht_dereference_bucket(*pprev, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
@@ -746,7 +780,7 @@ slow_path:
RCU_INIT_POINTER(list->next, NULL);
}
- rcu_assign_pointer(tbl->buckets[hash], obj);
+ rcu_assign_pointer(*pprev, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(he, *pprev, tbl, hash) {
struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead);
@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(he, *pprev, tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;