aboutsummaryrefslogtreecommitdiffstats
path: root/security/selinux/ss/sidtab.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/selinux/ss/sidtab.c')
-rw-r--r--security/selinux/ss/sidtab.c76
1 files changed, 56 insertions, 20 deletions
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index f511ffccb131..db5cce385bf8 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -27,8 +27,8 @@ struct sidtab_str_cache {
char str[];
};
-#define index_to_sid(index) (index + SECINITSID_NUM + 1)
-#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
+#define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
+#define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
int sidtab_init(struct sidtab *s)
{
@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
+ s->frozen = false;
s->count = 0;
s->convert = NULL;
hash_init(s->context_to_sid);
@@ -54,14 +55,15 @@ int sidtab_init(struct sidtab *s)
return 0;
}
-static u32 context_to_sid(struct sidtab *s, struct context *context)
+static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
{
struct sidtab_entry *entry;
u32 sid = 0;
rcu_read_lock();
- hash_for_each_possible_rcu(s->context_to_sid, entry, list,
- context->hash) {
+ hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
+ if (entry->hash != hash)
+ continue;
if (context_cmp(&entry->context, context)) {
sid = entry->sid;
break;
@@ -74,6 +76,7 @@ static u32 context_to_sid(struct sidtab *s, struct context *context)
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
{
struct sidtab_isid_entry *isid;
+ u32 hash;
int rc;
if (sid == 0 || sid > SECINITSID_NUM)
@@ -90,15 +93,18 @@ int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
#endif
isid->set = 1;
+ hash = context_compute_hash(context);
+
/*
* Multiple initial sids may map to the same context. Check that this
* context is not already represented in the context_to_sid hashtable
* to avoid duplicate entries and long linked lists upon hash
* collision.
*/
- if (!context_to_sid(s, context)) {
+ if (!context_to_sid(s, context, hash)) {
isid->entry.sid = sid;
- hash_add(s->context_to_sid, &isid->entry.list, context->hash);
+ isid->entry.hash = hash;
+ hash_add(s->context_to_sid, &isid->entry.list, hash);
}
return 0;
@@ -259,12 +265,12 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
u32 *sid)
{
unsigned long flags;
- u32 count;
+ u32 count, hash = context_compute_hash(context);
struct sidtab_convert_params *convert;
struct sidtab_entry *dst, *dst_convert;
int rc;
- *sid = context_to_sid(s, context);
+ *sid = context_to_sid(s, context, hash);
if (*sid)
return 0;
@@ -272,12 +278,20 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
spin_lock_irqsave(&s->lock, flags);
rc = 0;
- *sid = context_to_sid(s, context);
+ *sid = context_to_sid(s, context, hash);
if (*sid)
goto out_unlock;
- /* read entries only after reading count */
- count = smp_load_acquire(&s->count);
+ if (unlikely(s->frozen)) {
+ /*
+ * This sidtab is now frozen - tell the caller to abort and
+ * get the new one.
+ */
+ rc = -ESTALE;
+ goto out_unlock;
+ }
+
+ count = s->count;
convert = s->convert;
/* bail out if we already reached max entries */
@@ -292,6 +306,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
goto out_unlock;
dst->sid = index_to_sid(count);
+ dst->hash = hash;
rc = context_cpy(&dst->context, context);
if (rc)
@@ -310,16 +325,17 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
}
rc = convert->func(context, &dst_convert->context,
- convert->args);
+ convert->args, GFP_ATOMIC);
if (rc) {
context_destroy(&dst->context);
goto out_unlock;
}
dst_convert->sid = index_to_sid(count);
+ dst_convert->hash = context_compute_hash(&dst_convert->context);
convert->target->count = count + 1;
hash_add_rcu(convert->target->context_to_sid,
- &dst_convert->list, dst_convert->context.hash);
+ &dst_convert->list, dst_convert->hash);
}
if (context->len)
@@ -330,7 +346,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
/* write entries before updating count */
smp_store_release(&s->count, count + 1);
- hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
+ hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
rc = 0;
out_unlock:
@@ -346,10 +362,9 @@ static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
for (i = 0; i < count; i++) {
entry = sidtab_do_lookup(s, i, 0);
entry->sid = index_to_sid(i);
+ entry->hash = context_compute_hash(&entry->context);
- hash_add_rcu(s->context_to_sid, &entry->list,
- entry->context.hash);
-
+ hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
}
}
@@ -389,7 +404,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst,
while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
rc = convert->func(&esrc->ptr_leaf->entries[i].context,
&edst->ptr_leaf->entries[i].context,
- convert->args);
+ convert->args, GFP_KERNEL);
if (rc)
return rc;
(*pos)++;
@@ -459,6 +474,27 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
return 0;
}
+void sidtab_cancel_convert(struct sidtab *s)
+{
+ unsigned long flags;
+
+ /* cancelling policy load - disable live convert of sidtab */
+ spin_lock_irqsave(&s->lock, flags);
+ s->convert = NULL;
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+ spin_lock_irqsave(&s->lock, *flags);
+ s->frozen = true;
+ s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+ spin_unlock_irqrestore(&s->lock, *flags);
+}
+
static void sidtab_destroy_entry(struct sidtab_entry *entry)
{
context_destroy(&entry->context);
@@ -534,7 +570,7 @@ void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
goto out_unlock;
}
- cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
+ cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
if (!cache)
goto out_unlock;