aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-01-02 23:00:20 +0100
committerDavid S. Miller <davem@davemloft.net>2015-01-03 14:32:57 -0500
commit97defe1ecf868b8127f8e62395499d6a06e4c4b1 (patch)
treed3ed6d3db4943e01b1ae58e73580537ba1642d9e /include
parentspinlock: Add spin_lock_bh_nested() (diff)
downloadlinux-dev-97defe1ecf868b8127f8e62395499d6a06e4c4b1.tar.xz
linux-dev-97defe1ecf868b8127f8e62395499d6a06e4c4b1.zip
rhashtable: Per bucket locks & deferred expansion/shrinking
Introduces an array of spinlocks to protect bucket mutations. The number of spinlocks per CPU is configurable and selected based on the hash of the bucket. This allows for parallel insertions and removals of entries which do not share a lock. The patch also defers expansion and shrinking to a worker queue which allows insertion and removal from atomic context. Insertions and deletions may occur in parallel to it and are only held up briefly while the particular bucket is linked or unzipped. Mutations of the bucket table pointer is protected by a new mutex, read access is RCU protected. In the event of an expansion or shrinking, the new bucket table allocated is exposed as a so called future table as soon as the resize process starts. Lookups, deletions, and insertions will briefly use both tables. The future table becomes the main table after an RCU grace period and initial linking of the old to the new table was performed. Optimization of the chains to make use of the new number of buckets follows only the new table is in use. The side effect of this is that during that RCU grace period, a bucket traversal using any rht_for_each() variant on the main table will not see any insertions performed during the RCU grace period which would at that point land in the future table. The lookup will see them as it searches both tables if needed. Having multiple insertions and removals occur in parallel requires nelems to become an atomic counter. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/rhashtable.h37
1 files changed, 26 insertions, 11 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index f624d4b5045f..a1688f0a6193 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -19,6 +19,7 @@
#define _LINUX_RHASHTABLE_H
#include <linux/rculist.h>
+#include <linux/workqueue.h>
struct rhash_head {
struct rhash_head __rcu *next;
@@ -26,8 +27,17 @@ struct rhash_head {
#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @buckets: size * hash buckets
+ */
struct bucket_table {
size_t size;
+ unsigned int locks_mask;
+ spinlock_t *locks;
struct rhash_head __rcu *buckets[];
};
@@ -45,11 +55,11 @@ struct rhashtable;
* @hash_rnd: Seed to use while hashing
* @max_shift: Maximum number of shifts while expanding
* @min_shift: Minimum number of shifts while shrinking
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key
* @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand
* @shrink_decision: If defined, may return true if table should shrink
- * @mutex_is_held: Must return true if protecting mutex is held
*/
struct rhashtable_params {
size_t nelem_hint;
@@ -59,37 +69,42 @@ struct rhashtable_params {
u32 hash_rnd;
size_t max_shift;
size_t min_shift;
+ size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht,
size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size);
-#ifdef CONFIG_PROVE_LOCKING
- int (*mutex_is_held)(void *parent);
- void *parent;
-#endif
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
+ * @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
* @shift: Current size (1 << shift)
* @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @being_destroyed: True if table is set up for destruction
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- size_t nelems;
+ struct bucket_table __rcu *future_tbl;
+ atomic_t nelems;
size_t shift;
struct rhashtable_params p;
+ struct delayed_work run_work;
+ struct mutex mutex;
+ bool being_destroyed;
};
#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
#else
-static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
return 1;
}
@@ -112,11 +127,11 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht);
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key,
+void *rhashtable_lookup(struct rhashtable *ht, const void *key);
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
bool (*compare)(void *, void *), void *arg);
-void rhashtable_destroy(const struct rhashtable *ht);
+void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))