aboutsummaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 12:27:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 12:27:09 -0700
commit0bf13a84362e750a90008af259b098d7c0e0755b (patch)
treef7a315eb2c10ede9d92292a791fcff97486b924c /security
parentMerge tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt (diff)
parentloadpin: stop using bdevname (diff)
downloadlinux-dev-0bf13a84362e750a90008af259b098d7c0e0755b.tar.xz
linux-dev-0bf13a84362e750a90008af259b098d7c0e0755b.zip
Merge tag 'kernel-hardening-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kernel hardening updates from Kees Cook: - usercopy hardening expanded to check other allocation types (Matthew Wilcox, Yuanzheng Song) - arm64 stackleak behavioral improvements (Mark Rutland) - arm64 CFI code gen improvement (Sami Tolvanen) - LoadPin LSM block dev API adjustment (Christoph Hellwig) - Clang randstruct support (Bill Wendling, Kees Cook) * tag 'kernel-hardening-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (34 commits) loadpin: stop using bdevname mm: usercopy: move the virt_addr_valid() below the is_vmalloc_addr() gcc-plugins: randstruct: Remove cast exception handling af_unix: Silence randstruct GCC plugin warning niu: Silence randstruct warnings big_keys: Use struct for internal payload gcc-plugins: Change all version strings match kernel randomize_kstack: Improve docs on requirements/rationale lkdtm/stackleak: fix CONFIG_GCC_PLUGIN_STACKLEAK=n arm64: entry: use stackleak_erase_on_task_stack() stackleak: add on/off stack variants lkdtm/stackleak: check stack boundaries lkdtm/stackleak: prevent unexpected stack usage lkdtm/stackleak: rework boundary management lkdtm/stackleak: avoid spurious failure stackleak: rework poison scanning stackleak: rework stack high bound handling stackleak: clarify variable names stackleak: rework stack low bound handling stackleak: remove redundant check ...
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig13
-rw-r--r--security/Kconfig.hardening73
-rw-r--r--security/keys/big_key.c73
-rw-r--r--security/loadpin/loadpin.c5
-rw-r--r--security/security.c9
5 files changed, 115 insertions, 58 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 9b2c4925585a..f29e4c656983 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -160,20 +160,9 @@ config HARDENED_USERCOPY
copy_from_user() functions) by rejecting memory ranges that
are larger than the specified heap object, span multiple
separately allocated pages, are not on the process stack,
- or are part of the kernel text. This kills entire classes
+ or are part of the kernel text. This prevents entire classes
of heap overflow exploits and similar kernel memory exposures.
-config HARDENED_USERCOPY_PAGESPAN
- bool "Refuse to copy allocations that span multiple pages"
- depends on HARDENED_USERCOPY
- depends on BROKEN
- help
- When a multi-page allocation is done without __GFP_COMP,
- hardened usercopy will reject attempts to copy it. There are,
- however, several cases of this in the kernel that have not all
- been removed. This config is intended to be used only while
- trying to find such users.
-
config FORTIFY_SOURCE
bool "Harden common str/mem functions against buffer overflows"
depends on ARCH_HAS_FORTIFY_SOURCE
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index ded4d7c0d132..bd2aabb2c60f 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -266,4 +266,77 @@ config ZERO_CALL_USED_REGS
endmenu
+config CC_HAS_RANDSTRUCT
+ def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
+
+choice
+ prompt "Randomize layout of sensitive kernel structures"
+ default RANDSTRUCT_FULL if COMPILE_TEST && (GCC_PLUGINS || CC_HAS_RANDSTRUCT)
+ default RANDSTRUCT_NONE
+ help
+ If you enable this, the layouts of structures that are entirely
+ function pointers (and have not been manually annotated with
+ __no_randomize_layout), or structures that have been explicitly
+ marked with __randomize_layout, will be randomized at compile-time.
+ This can introduce the requirement of an additional information
+ exposure vulnerability for exploits targeting these structure
+ types.
+
+ Enabling this feature will introduce some performance impact,
+ slightly increase memory usage, and prevent the use of forensic
+ tools like Volatility against the system (unless the kernel
+ source tree isn't cleaned after kernel installation).
+
+ The seed used for compilation is in scripts/basic/randomize.seed.
+ It remains after a "make clean" to allow for external modules to
+ be compiled with the existing seed and will be removed by a
+ "make mrproper" or "make distclean". This file should not be made
+ public, or the structure layout can be determined.
+
+ config RANDSTRUCT_NONE
+ bool "Disable structure layout randomization"
+ help
+ Build normally: no structure layout randomization.
+
+ config RANDSTRUCT_FULL
+ bool "Fully randomize structure layout"
+ depends on CC_HAS_RANDSTRUCT || GCC_PLUGINS
+ select MODVERSIONS if MODULES
+ help
+ Fully randomize the member layout of sensitive
+ structures as much as possible, which may have both a
+ memory size and performance impact.
+
+ One difference between the Clang and GCC plugin
+ implementations is the handling of bitfields. The GCC
+ plugin treats them as fully separate variables,
+ introducing sometimes significant padding. Clang tries
+ to keep adjacent bitfields together, but with their bit
+ ordering randomized.
+
+ config RANDSTRUCT_PERFORMANCE
+ bool "Limit randomization of structure layout to cache-lines"
+ depends on GCC_PLUGINS
+ select MODVERSIONS if MODULES
+ help
+ Randomization of sensitive kernel structures will make a
+ best effort at restricting randomization to cacheline-sized
+ groups of members. It will further not randomize bitfields
+ in structures. This reduces the performance hit of RANDSTRUCT
+ at the cost of weakened randomization.
+endchoice
+
+config RANDSTRUCT
+ def_bool !RANDSTRUCT_NONE
+
+config GCC_PLUGIN_RANDSTRUCT
+ def_bool GCC_PLUGINS && RANDSTRUCT
+ help
+ Use GCC plugin to randomize structure layout.
+
+ This plugin was ported from grsecurity/PaX. More
+ information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
endmenu
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index d17e5f09eeb8..c3367622c683 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -20,12 +20,13 @@
/*
* Layout of key payload words.
*/
-enum {
- big_key_data,
- big_key_path,
- big_key_path_2nd_part,
- big_key_len,
+struct big_key_payload {
+ u8 *data;
+ struct path path;
+ size_t length;
};
+#define to_big_key_payload(payload) \
+ (struct big_key_payload *)((payload).data)
/*
* If the data is under this limit, there's no point creating a shm file to
@@ -55,7 +56,7 @@ struct key_type key_type_big_key = {
*/
int big_key_preparse(struct key_preparsed_payload *prep)
{
- struct path *path = (struct path *)&prep->payload.data[big_key_path];
+ struct big_key_payload *payload = to_big_key_payload(prep->payload);
struct file *file;
u8 *buf, *enckey;
ssize_t written;
@@ -63,13 +64,15 @@ int big_key_preparse(struct key_preparsed_payload *prep)
size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
int ret;
+ BUILD_BUG_ON(sizeof(*payload) != sizeof(prep->payload.data));
+
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
return -EINVAL;
/* Set an arbitrary quota */
prep->quotalen = 16;
- prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
+ payload->length = datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
/* Create a shmem file to store the data in. This will permit the data
@@ -117,9 +120,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
/* Pin the mount and dentry to the key so that we can open it again
* later
*/
- prep->payload.data[big_key_data] = enckey;
- *path = file->f_path;
- path_get(path);
+ payload->data = enckey;
+ payload->path = file->f_path;
+ path_get(&payload->path);
fput(file);
kvfree_sensitive(buf, enclen);
} else {
@@ -129,7 +132,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
if (!data)
return -ENOMEM;
- prep->payload.data[big_key_data] = data;
+ payload->data = data;
memcpy(data, prep->data, prep->datalen);
}
return 0;
@@ -148,12 +151,11 @@ error:
*/
void big_key_free_preparse(struct key_preparsed_payload *prep)
{
- if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
- struct path *path = (struct path *)&prep->payload.data[big_key_path];
+ struct big_key_payload *payload = to_big_key_payload(prep->payload);
- path_put(path);
- }
- kfree_sensitive(prep->payload.data[big_key_data]);
+ if (prep->datalen > BIG_KEY_FILE_THRESHOLD)
+ path_put(&payload->path);
+ kfree_sensitive(payload->data);
}
/*
@@ -162,13 +164,12 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
*/
void big_key_revoke(struct key *key)
{
- struct path *path = (struct path *)&key->payload.data[big_key_path];
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
/* clear the quota */
key_payload_reserve(key, 0);
- if (key_is_positive(key) &&
- (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
- vfs_truncate(path, 0);
+ if (key_is_positive(key) && payload->length > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(&payload->path, 0);
}
/*
@@ -176,17 +177,15 @@ void big_key_revoke(struct key *key)
*/
void big_key_destroy(struct key *key)
{
- size_t datalen = (size_t)key->payload.data[big_key_len];
-
- if (datalen > BIG_KEY_FILE_THRESHOLD) {
- struct path *path = (struct path *)&key->payload.data[big_key_path];
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
- path_put(path);
- path->mnt = NULL;
- path->dentry = NULL;
+ if (payload->length > BIG_KEY_FILE_THRESHOLD) {
+ path_put(&payload->path);
+ payload->path.mnt = NULL;
+ payload->path.dentry = NULL;
}
- kfree_sensitive(key->payload.data[big_key_data]);
- key->payload.data[big_key_data] = NULL;
+ kfree_sensitive(payload->data);
+ payload->data = NULL;
}
/*
@@ -211,14 +210,14 @@ int big_key_update(struct key *key, struct key_preparsed_payload *prep)
*/
void big_key_describe(const struct key *key, struct seq_file *m)
{
- size_t datalen = (size_t)key->payload.data[big_key_len];
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
seq_puts(m, key->description);
if (key_is_positive(key))
seq_printf(m, ": %zu [%s]",
- datalen,
- datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+ payload->length,
+ payload->length > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
}
/*
@@ -227,16 +226,16 @@ void big_key_describe(const struct key *key, struct seq_file *m)
*/
long big_key_read(const struct key *key, char *buffer, size_t buflen)
{
- size_t datalen = (size_t)key->payload.data[big_key_len];
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
+ size_t datalen = payload->length;
long ret;
if (!buffer || buflen < datalen)
return datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
- struct path *path = (struct path *)&key->payload.data[big_key_path];
struct file *file;
- u8 *buf, *enckey = (u8 *)key->payload.data[big_key_data];
+ u8 *buf, *enckey = payload->data;
size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
loff_t pos = 0;
@@ -244,7 +243,7 @@ long big_key_read(const struct key *key, char *buffer, size_t buflen)
if (!buf)
return -ENOMEM;
- file = dentry_open(path, O_RDONLY, current_cred());
+ file = dentry_open(&payload->path, O_RDONLY, current_cred());
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto error;
@@ -274,7 +273,7 @@ error:
kvfree_sensitive(buf, enclen);
} else {
ret = datalen;
- memcpy(buffer, key->payload.data[big_key_data], datalen);
+ memcpy(buffer, payload->data, datalen);
}
return ret;
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index b12f7d986b1e..ad4e6756c038 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -78,11 +78,8 @@ static void check_pinning_enforcement(struct super_block *mnt_sb)
* device, allow sysctl to change modes for testing.
*/
if (mnt_sb->s_bdev) {
- char bdev[BDEVNAME_SIZE];
-
ro = bdev_read_only(mnt_sb->s_bdev);
- bdevname(mnt_sb->s_bdev, bdev);
- pr_info("%s (%u:%u): %s\n", bdev,
+ pr_info("%pg (%u:%u): %s\n", mnt_sb->s_bdev,
MAJOR(mnt_sb->s_bdev->bd_dev),
MINOR(mnt_sb->s_bdev->bd_dev),
ro ? "read-only" : "writable");
diff --git a/security/security.c b/security/security.c
index aaf6566deb9f..08420c6ff52d 100644
--- a/security/security.c
+++ b/security/security.c
@@ -367,13 +367,12 @@ static void __init ordered_lsm_init(void)
int __init early_security_init(void)
{
- int i;
- struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
struct lsm_info *lsm;
- for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
- i++)
- INIT_HLIST_HEAD(&list[i]);
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ INIT_HLIST_HEAD(&security_hook_heads.NAME);
+#include "linux/lsm_hook_defs.h"
+#undef LSM_HOOK
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
if (!lsm->enabled)