aboutsummaryrefslogtreecommitdiffstats
path: root/security/selinux/ss/sidtab.c
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2019-11-22 10:33:06 +0100
committerPaul Moore <paul@paul-moore.com>2019-12-09 16:14:51 -0500
commit66f8e2f03c02e812002f8e9e465681cc62edda5b (patch)
tree44851eca8f9b556373e095245eb60491eab48d73 /security/selinux/ss/sidtab.c
parentLinux 5.5-rc1 (diff)
downloadlinux-dev-66f8e2f03c02e812002f8e9e465681cc62edda5b.tar.xz
linux-dev-66f8e2f03c02e812002f8e9e465681cc62edda5b.zip
selinux: sidtab reverse lookup hash table
This replaces the reverse table lookup and reverse cache with a hashtable which improves cache-miss reverse-lookup times from O(n) to O(1)* and maintains the same performance as a reverse cache hit. This reduces the time needed to add a new sidtab entry from ~500us to 5us on a Pixel 3 when there are ~10,000 sidtab entries. The implementation uses the kernel's generic hashtable API, It uses the context's string represtation as the hash source, and the kernels generic string hashing algorithm full_name_hash() to reduce the string to a 32 bit value. This change also maintains the improvement introduced in commit ee1a84fdfeed ("selinux: overhaul sidtab to fix bug and improve performance") which removed the need to keep the current sidtab locked during policy reload. It does however introduce periodic locking of the target sidtab while converting the hashtable. Sidtab entries are never modified or removed, so the context struct stored in the sid_to_context tree can also be used for the context_to_sid hashtable to reduce memory usage. This bug was reported by: - On the selinux bug tracker. BUG: kernel softlockup due to too many SIDs/contexts #37 https://github.com/SELinuxProject/selinux-kernel/issues/37 - Jovana Knezevic on Android's bugtracker. Bug: 140252993 "During multi-user performance testing, we create and remove users many times. selinux_android_restorecon_pkgdir goes from 1ms to over 20ms after about 200 user creations and removals. Accumulated over ~280 packages, that adds a significant time to user creation, making perf benchmarks unreliable." * Hashtable lookup is only O(1) when n < the number of buckets. Signed-off-by: Jeff Vander Stoep <jeffv@google.com> Reported-by: Stephen Smalley <sds@tycho.nsa.gov> Reported-by: Jovana Knezevic <jovanak@google.com> Reviewed-by: Stephen Smalley <sds@tycho.nsa.gov> Tested-by: Stephen Smalley <sds@tycho.nsa.gov> [PM: subj tweak, removed changelog from patch description] Signed-off-by: Paul Moore <paul@paul-moore.com>
Diffstat (limited to 'security/selinux/ss/sidtab.c')
-rw-r--r--security/selinux/ss/sidtab.c263
1 files changed, 127 insertions, 136 deletions
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index 7d49994e8d5f..d9d8599e8e63 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -17,26 +17,43 @@
#include "security.h"
#include "sidtab.h"
+#define index_to_sid(index) (index + SECINITSID_NUM + 1)
+#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
+
int sidtab_init(struct sidtab *s)
{
u32 i;
memset(s->roots, 0, sizeof(s->roots));
- /* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
- for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
- s->rcache[i] = SIDTAB_MAX;
-
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
s->count = 0;
s->convert = NULL;
+ hash_init(s->context_to_sid);
spin_lock_init(&s->lock);
return 0;
}
+static u32 context_to_sid(struct sidtab *s, struct context *context)
+{
+ struct sidtab_entry_leaf *entry;
+ u32 sid = 0;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(s->context_to_sid, entry, list,
+ context->hash) {
+ if (context_cmp(&entry->context, context)) {
+ sid = entry->sid;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return sid;
+}
+
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
{
struct sidtab_isid_entry *entry;
@@ -47,14 +64,60 @@ int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
entry = &s->isids[sid - 1];
- rc = context_cpy(&entry->context, context);
+ rc = context_cpy(&entry->leaf.context, context);
if (rc)
return rc;
entry->set = 1;
+
+ /*
+ * Multiple initial sids may map to the same context. Check that this
+ * context is not already represented in the context_to_sid hashtable
+ * to avoid duplicate entries and long linked lists upon hash
+ * collision.
+ */
+ if (!context_to_sid(s, context)) {
+ entry->leaf.sid = sid;
+ hash_add(s->context_to_sid, &entry->leaf.list, context->hash);
+ }
+
return 0;
}
+int sidtab_hash_stats(struct sidtab *sidtab, char *page)
+{
+ int i;
+ int chain_len = 0;
+ int slots_used = 0;
+ int entries = 0;
+ int max_chain_len = 0;
+ int cur_bucket = 0;
+ struct sidtab_entry_leaf *entry;
+
+ rcu_read_lock();
+ hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
+ entries++;
+ if (i == cur_bucket) {
+ chain_len++;
+ if (chain_len == 1)
+ slots_used++;
+ } else {
+ cur_bucket = i;
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ chain_len = 0;
+ }
+ }
+ rcu_read_unlock();
+
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+
+ return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+ "longest chain: %d\n", entries,
+ slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
+}
+
static u32 sidtab_level_from_count(u32 count)
{
u32 capacity = SIDTAB_LEAF_ENTRIES;
@@ -88,7 +151,8 @@ static int sidtab_alloc_roots(struct sidtab *s, u32 level)
return 0;
}
-static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
+static struct sidtab_entry_leaf *sidtab_do_lookup(struct sidtab *s, u32 index,
+ int alloc)
{
union sidtab_entry_inner *entry;
u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
@@ -125,7 +189,7 @@ static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
if (!entry->ptr_leaf)
return NULL;
}
- return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context;
+ return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
}
static struct context *sidtab_lookup(struct sidtab *s, u32 index)
@@ -136,12 +200,12 @@ static struct context *sidtab_lookup(struct sidtab *s, u32 index)
if (index >= count)
return NULL;
- return sidtab_do_lookup(s, index, 0);
+ return &sidtab_do_lookup(s, index, 0)->context;
}
static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
{
- return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL;
+ return s->isids[sid - 1].set ? &s->isids[sid - 1].leaf.context : NULL;
}
static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
@@ -150,7 +214,7 @@ static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
if (sid != 0) {
if (sid > SECINITSID_NUM)
- context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1));
+ context = sidtab_lookup(s, sid_to_index(sid));
else
context = sidtab_lookup_initial(s, sid);
if (context && (!context->len || force))
@@ -170,117 +234,30 @@ struct context *sidtab_search_force(struct sidtab *s, u32 sid)
return sidtab_search_core(s, sid, 1);
}
-static int sidtab_find_context(union sidtab_entry_inner entry,
- u32 *pos, u32 count, u32 level,
- struct context *context, u32 *index)
-{
- int rc;
- u32 i;
-
- if (level != 0) {
- struct sidtab_node_inner *node = entry.ptr_inner;
-
- i = 0;
- while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
- rc = sidtab_find_context(node->entries[i],
- pos, count, level - 1,
- context, index);
- if (rc == 0)
- return 0;
- i++;
- }
- } else {
- struct sidtab_node_leaf *node = entry.ptr_leaf;
-
- i = 0;
- while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
- if (context_cmp(&node->entries[i].context, context)) {
- *index = *pos;
- return 0;
- }
- (*pos)++;
- i++;
- }
- }
- return -ENOENT;
-}
-
-static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
-{
- while (pos > 0) {
- WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
- --pos;
- }
- WRITE_ONCE(s->rcache[0], index);
-}
-
-static void sidtab_rcache_push(struct sidtab *s, u32 index)
-{
- sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1);
-}
-
-static int sidtab_rcache_search(struct sidtab *s, struct context *context,
- u32 *index)
-{
- u32 i;
-
- for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
- u32 v = READ_ONCE(s->rcache[i]);
-
- if (v >= SIDTAB_MAX)
- continue;
-
- if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
- sidtab_rcache_update(s, v, i);
- *index = v;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
- u32 *index)
+int sidtab_context_to_sid(struct sidtab *s, struct context *context,
+ u32 *sid)
{
unsigned long flags;
- u32 count, count_locked, level, pos;
+ u32 count;
struct sidtab_convert_params *convert;
- struct context *dst, *dst_convert;
+ struct sidtab_entry_leaf *dst, *dst_convert;
int rc;
- rc = sidtab_rcache_search(s, context, index);
- if (rc == 0)
- return 0;
-
- /* read entries only after reading count */
- count = smp_load_acquire(&s->count);
- level = sidtab_level_from_count(count);
-
- pos = 0;
- rc = sidtab_find_context(s->roots[level], &pos, count, level,
- context, index);
- if (rc == 0) {
- sidtab_rcache_push(s, *index);
+ *sid = context_to_sid(s, context);
+ if (*sid)
return 0;
- }
/* lock-free search failed: lock, re-search, and insert if not found */
spin_lock_irqsave(&s->lock, flags);
+ rc = 0;
+ *sid = context_to_sid(s, context);
+ if (*sid)
+ goto out_unlock;
+
+ /* read entries only after reading count */
+ count = smp_load_acquire(&s->count);
convert = s->convert;
- count_locked = s->count;
- level = sidtab_level_from_count(count_locked);
-
- /* if count has changed before we acquired the lock, then catch up */
- while (count < count_locked) {
- if (context_cmp(sidtab_do_lookup(s, count, 0), context)) {
- sidtab_rcache_push(s, count);
- *index = count;
- rc = 0;
- goto out_unlock;
- }
- ++count;
- }
/* bail out if we already reached max entries */
rc = -EOVERFLOW;
@@ -293,7 +270,9 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
if (!dst)
goto out_unlock;
- rc = context_cpy(dst, context);
+ dst->sid = index_to_sid(count);
+
+ rc = context_cpy(&dst->context, context);
if (rc)
goto out_unlock;
@@ -305,29 +284,32 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
rc = -ENOMEM;
dst_convert = sidtab_do_lookup(convert->target, count, 1);
if (!dst_convert) {
- context_destroy(dst);
+ context_destroy(&dst->context);
goto out_unlock;
}
- rc = convert->func(context, dst_convert, convert->args);
+ rc = convert->func(context, &dst_convert->context,
+ convert->args);
if (rc) {
- context_destroy(dst);
+ context_destroy(&dst->context);
goto out_unlock;
}
-
- /* at this point we know the insert won't fail */
+ dst_convert->sid = index_to_sid(count);
convert->target->count = count + 1;
+
+ hash_add_rcu(convert->target->context_to_sid,
+ &dst_convert->list, dst_convert->context.hash);
}
if (context->len)
pr_info("SELinux: Context %s is not valid (left unmapped).\n",
context->str);
- sidtab_rcache_push(s, count);
- *index = count;
+ *sid = index_to_sid(count);
- /* write entries before writing new count */
+ /* write entries before updating count */
smp_store_release(&s->count, count + 1);
+ hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
rc = 0;
out_unlock:
@@ -335,25 +317,19 @@ out_unlock:
return rc;
}
-int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
+static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
{
- int rc;
+ struct sidtab_entry_leaf *entry;
u32 i;
- for (i = 0; i < SECINITSID_NUM; i++) {
- struct sidtab_isid_entry *entry = &s->isids[i];
+ for (i = 0; i < count; i++) {
+ entry = sidtab_do_lookup(s, i, 0);
+ entry->sid = index_to_sid(i);
- if (entry->set && context_cmp(context, &entry->context)) {
- *sid = i + 1;
- return 0;
- }
- }
+ hash_add_rcu(s->context_to_sid, &entry->list,
+ entry->context.hash);
- rc = sidtab_reverse_lookup(s, context, sid);
- if (rc)
- return rc;
- *sid += SECINITSID_NUM + 1;
- return 0;
+ }
}
static int sidtab_convert_tree(union sidtab_entry_inner *edst,
@@ -400,6 +376,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst,
}
cond_resched();
}
+
return 0;
}
@@ -435,7 +412,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
/* enable live convert of new entries */
s->convert = params;
- /* we can safely do the rest of the conversion outside the lock */
+ /* we can safely convert the tree outside the lock */
spin_unlock_irqrestore(&s->lock, flags);
pr_info("SELinux: Converting %u SID table entries...\n", count);
@@ -449,8 +426,17 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
spin_lock_irqsave(&s->lock, flags);
s->convert = NULL;
spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
}
- return rc;
+ /*
+ * The hashtable can also be modified in sidtab_context_to_sid()
+ * so we must re-acquire the lock here.
+ */
+ spin_lock_irqsave(&s->lock, flags);
+ sidtab_convert_hashtable(params->target, count);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
}
static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
@@ -484,11 +470,16 @@ void sidtab_destroy(struct sidtab *s)
for (i = 0; i < SECINITSID_NUM; i++)
if (s->isids[i].set)
- context_destroy(&s->isids[i].context);
+ context_destroy(&s->isids[i].leaf.context);
level = SIDTAB_MAX_LEVEL;
while (level && !s->roots[level].ptr_inner)
--level;
sidtab_destroy_tree(s->roots[level], level);
+ /*
+ * The context_to_sid hashtable's objects are all shared
+ * with the isids array and context tree, and so don't need
+ * to be cleaned up here.
+ */
}