aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2022-03-24 18:10:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:06:46 -0700
commit5b2c07138cbd8c0c415c6d3ff5b8040532024814 (patch)
treed25ba0a9488281a76e06edb110edf5a5e84a40a1 /mm/page_alloc.c
parentkasan, page_alloc: deduplicate should_skip_kasan_poison (diff)
downloadwireguard-linux-5b2c07138cbd8c0c415c6d3ff5b8040532024814.tar.xz
wireguard-linux-5b2c07138cbd8c0c415c6d3ff5b8040532024814.zip
kasan, page_alloc: move tag_clear_highpage out of kernel_init_free_pages
Currently, kernel_init_free_pages() serves two purposes: it either only zeroes memory or zeroes both memory and memory tags via a different code path. As this function has only two callers, each using only one code path, this behaviour is confusing. Pull the code that zeroes both memory and tags out of kernel_init_free_pages(). As a result of this change, the code in free_pages_prepare() starts to look complicated, but this is improved in the few following patches. Those improvements are not integrated into this patch to make diffs easier to read. This patch does no functional changes. Link: https://lkml.kernel.org/r/7719874e68b23902629c7cf19f966c4fd5f57979.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Acked-by: Marco Elver <elver@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3e7bbb5dae41..0721ff0c90be 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1278,16 +1278,10 @@ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
PageSkipKASanPoison(page);
}
-static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
+static void kernel_init_free_pages(struct page *page, int numpages)
{
int i;
- if (zero_tags) {
- for (i = 0; i < numpages; i++)
- tag_clear_highpage(page + i);
- return;
- }
-
/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
for (i = 0; i < numpages; i++) {
@@ -1383,7 +1377,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
bool init = want_init_on_free();
if (init)
- kernel_init_free_pages(page, 1 << order, false);
+ kernel_init_free_pages(page, 1 << order);
if (!skip_kasan_poison)
kasan_poison_pages(page, order, init);
}
@@ -2378,9 +2372,17 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
kasan_unpoison_pages(page, order, init);
- if (init)
- kernel_init_free_pages(page, 1 << order,
- gfp_flags & __GFP_ZEROTAGS);
+
+ if (init) {
+ if (gfp_flags & __GFP_ZEROTAGS) {
+ int i;
+
+ for (i = 0; i < 1 << order; i++)
+ tag_clear_highpage(page + i);
+ } else {
+ kernel_init_free_pages(page, 1 << order);
+ }
+ }
}
set_page_owner(page, order, gfp_flags);