aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-06-07 11:05:33 -0700
committerKees Cook <keescook@chromium.org>2016-07-26 14:41:47 -0700
commitf5509cc18daa7f82bcc553be70df2117c8eedc16 (patch)
tree648605cc96e4ac412a9f5201468795574997d9bb /include/linux/slab.h
parentmm: Implement stack frame object validation (diff)
downloadlinux-dev-f5509cc18daa7f82bcc553be70df2117c8eedc16.tar.xz
linux-dev-f5509cc18daa7f82bcc553be70df2117c8eedc16.zip
mm: Hardened usercopy
This is the start of porting PAX_USERCOPY into the mainline kernel. This is the first set of features, controlled by CONFIG_HARDENED_USERCOPY. The work is based on code by PaX Team and Brad Spengler, and an earlier port from Casey Schaufler. Additional non-slab page tests are from Rik van Riel. This patch contains the logic for validating several conditions when performing copy_to_user() and copy_from_user() on the kernel object being copied to/from: - address range doesn't wrap around - address range isn't NULL or zero-allocated (with a non-zero copy size) - if on the slab allocator: - object size must be less than or equal to copy size (when check is implemented in the allocator, which appear in subsequent patches) - otherwise, object must not span page allocations (excepting Reserved and CMA ranges) - if on the stack - object must not extend before/after the current process stack - object must be contained by a valid stack frame (when there is arch/build support for identifying stack frames) - object must not overlap with kernel text Signed-off-by: Kees Cook <keescook@chromium.org> Tested-by: Valdis Kletnieks <valdis.kletnieks@vt.edu> Tested-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index aeb3e6d00a66..96a16a3fb7cb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.