aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-10 23:15:27 +0000
committerKees Cook <keescook@chromium.org>2022-04-13 12:15:50 -0700
commit4e140f59d285c1ca1e5c81b4c13e27366865bd09 (patch)
tree0e16718847cb02bacbe823ac5ba388619e46359f
parentMerge tag 'hardening-v5.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux (diff)
downloadlinux-dev-4e140f59d285c1ca1e5c81b4c13e27366865bd09.tar.xz
linux-dev-4e140f59d285c1ca1e5c81b4c13e27366865bd09.zip
mm/usercopy: Check kmap addresses properly
If you are copying to an address in the kmap region, you may not copy across a page boundary, no matter what the size of the underlying allocation. You can't kmap() a slab page because slab pages always come from low memory. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220110231530.665970-2-willy@infradead.org
-rw-r--r--arch/x86/include/asm/highmem.h1
-rw-r--r--include/linux/highmem-internal.h10
-rw-r--r--mm/usercopy.c16
3 files changed, 21 insertions, 6 deletions
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 032e020853aa..731ee7cc40a5 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -26,6 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/fixmap.h>
+#include <asm/pgtable_areas.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index a77be5630209..337bd9f32921 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -149,6 +149,11 @@ static inline void totalhigh_pages_add(long count)
atomic_long_add(count, &_totalhigh_pages);
}
+static inline bool is_kmap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+ return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
+}
#else /* CONFIG_HIGHMEM */
static inline struct page *kmap_to_page(void *addr)
@@ -234,6 +239,11 @@ static inline void __kunmap_atomic(void *addr)
static inline unsigned int nr_free_highpages(void) { return 0; }
static inline unsigned long totalhigh_pages(void) { return 0UL; }
+static inline bool is_kmap_addr(const void *x)
+{
+ return false;
+}
+
#endif /* CONFIG_HIGHMEM */
/*
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 2c235d5c2364..ff13e7708faa 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -229,12 +229,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
if (!virt_addr_valid(ptr))
return;
- /*
- * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
- * highmem page or fallback to virt_to_page(). The following
- * is effectively a highmem-aware virt_to_slab().
- */
- folio = page_folio(kmap_to_page((void *)ptr));
+ if (is_kmap_addr(ptr)) {
+ unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
+
+ if ((unsigned long)ptr + n - 1 > page_end)
+ usercopy_abort("kmap", NULL, to_user,
+ offset_in_page(ptr), n);
+ return;
+ }
+
+ folio = virt_to_folio(ptr);
if (folio_test_slab(folio)) {
/* Check slab allocator for flags and size. */