aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kasan.h
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2021-04-29 23:00:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-30 11:20:41 -0700
commit1bb5eab30d68c1a3d9dbc822e1895e6c06dbe748 (patch)
tree38af4345a7eaa93af2e2fead0b02e15cb6b691a3 /include/linux/kasan.h
parentkasan: init memory in kasan_(un)poison for HW_TAGS (diff)
downloadlinux-dev-1bb5eab30d68c1a3d9dbc822e1895e6c06dbe748.tar.xz
linux-dev-1bb5eab30d68c1a3d9dbc822e1895e6c06dbe748.zip
kasan, mm: integrate page_alloc init with HW_TAGS
This change uses the previously added memory initialization feature of HW_TAGS KASAN routines for page_alloc memory when init_on_alloc/free is enabled. With this change, kernel_init_free_pages() is no longer called when both HW_TAGS KASAN and init_on_alloc/free are enabled. Instead, memory is initialized in KASAN runtime. To avoid discrepancies with which memory gets initialized that can be caused by future changes, both KASAN and kernel_init_free_pages() hooks are put together and a warning comment is added. This patch changes the order in which memory initialization and page poisoning hooks are called. This doesn't lead to any side-effects, as whenever page poisoning is enabled, memory initialization gets disabled. Combining setting allocation tags with memory initialization improves HW_TAGS KASAN performance when init_on_alloc/free is enabled. [andreyknvl@google.com: fix for "integrate page_alloc init with HW_TAGS"] Link: https://lkml.kernel.org/r/65b6028dea2e9a6e8e2cb779b5115c09457363fc.1617122211.git.andreyknvl@google.com Link: https://lkml.kernel.org/r/e77f0d5b1b20658ef0b8288625c74c2b3690e725.1615296150.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Tested-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Sergei Trofimovich <slyfox@gentoo.org> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/kasan.h')
-rw-r--r--include/linux/kasan.h30
1 files changed, 22 insertions, 8 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 9f5faefd1744..30aa2bee8400 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -96,6 +96,11 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled);
}
+static inline bool kasan_has_integrated_init(void)
+{
+ return kasan_enabled();
+}
+
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void)
@@ -103,6 +108,11 @@ static inline bool kasan_enabled(void)
return true;
}
+static inline bool kasan_has_integrated_init(void)
+{
+ return false;
+}
+
#endif /* CONFIG_KASAN_HW_TAGS */
slab_flags_t __kasan_never_merge(void);
@@ -120,20 +130,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size);
}
-void __kasan_alloc_pages(struct page *page, unsigned int order);
+void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_alloc_pages(struct page *page,
- unsigned int order)
+ unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_alloc_pages(page, order);
+ __kasan_alloc_pages(page, order, init);
}
-void __kasan_free_pages(struct page *page, unsigned int order);
+void __kasan_free_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_free_pages(struct page *page,
- unsigned int order)
+ unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_free_pages(page, order);
+ __kasan_free_pages(page, order, init);
}
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -277,13 +287,17 @@ static inline bool kasan_enabled(void)
{
return false;
}
+static inline bool kasan_has_integrated_init(void)
+{
+ return false;
+}
static inline slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}