diff options
author | 2018-08-06 20:56:34 +0200 | |
---|---|---|
committer | 2018-08-06 20:56:34 +0200 | |
commit | 315706049c343794ad0d3e5b6f6b60b900457b11 (patch) | |
tree | 0a1596a4b5843a6262d17b4c656715854784977d /mm/page_alloc.c | |
parent | x86/speculation: Support Enhanced IBRS on future CPUs (diff) | |
parent | x86/mm/init: Remove freed kernel image areas from alias mapping (diff) | |
download | wireguard-linux-315706049c343794ad0d3e5b6f6b60b900457b11.tar.xz wireguard-linux-315706049c343794ad0d3e5b6f6b60b900457b11.zip |
Merge branch 'x86/pti-urgent' into x86/pti
Integrate the PTI Global bit fixes which conflict with the 32bit PTI
support.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to '')
-rw-r--r-- | mm/page_alloc.c | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5d800d61ddb7..3222193c46c6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, free_area_init_core(pgdat); } -#ifdef CONFIG_HAVE_MEMBLOCK +#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) /* * Only struct pages that are backed by physical memory are zeroed and * initialized by going through __init_single_page(). But, there are some @@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void) if (pgcnt) pr_info("Reserved but unavailable: %lld pages", pgcnt); } -#endif /* CONFIG_HAVE_MEMBLOCK */ +#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP @@ -6939,9 +6939,21 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s) start = (void *)PAGE_ALIGN((unsigned long)start); end = (void *)((unsigned long)end & PAGE_MASK); for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { + struct page *page = virt_to_page(pos); + void *direct_map_addr; + + /* + * 'direct_map_addr' might be different from 'pos' + * because some architectures' virt_to_page() + * work with aliases. Getting the direct map + * address ensures that we get a _writeable_ + * alias for the memset(). + */ + direct_map_addr = page_address(page); if ((unsigned int)poison <= 0xFF) - memset(pos, poison, PAGE_SIZE); - free_reserved_page(virt_to_page(pos)); + memset(direct_map_addr, poison, PAGE_SIZE); + + free_reserved_page(page); } if (pages && s) |