aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/boot/kaslr.c
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2019-08-19 23:19:00 +0200
committerVasily Gorbik <gor@linux.ibm.com>2019-08-26 12:51:18 +0200
commit759d4899d905c2a491d34987366421aebca359de (patch)
tree24a348f45afcab6e90b02c6c42f6482562a7dd3f /arch/s390/boot/kaslr.c
parents390/mem_detect: provide single get_mem_detect_end (diff)
downloadlinux-dev-759d4899d905c2a491d34987366421aebca359de.tar.xz
linux-dev-759d4899d905c2a491d34987366421aebca359de.zip
s390/kaslr: reserve memory for kasan usage
Sometimes the kernel fails to boot with: "The Linux kernel failed to boot with the KernelAddressSanitizer: out of memory during initialisation" even with big amounts of memory when both kaslr and kasan are enabled. The problem is that kasan initialization code requires 1/8 of physical memory plus some for page tables. To keep as much code instrumented as possible kasan avoids using memblock for memory allocations. Instead kasan uses trivial memory allocator which simply chops off the memory from the end of online physical memory. For that reason when kaslr is enabled together with kasan avoid positioning kernel into upper memory region which would be utilized during kasan initialization. Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to '')
-rw-r--r--arch/s390/boot/kaslr.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3bdd8132e56b..b4bb4ad15c5b 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -3,6 +3,7 @@
* Copyright IBM Corp. 2019
*/
#include <asm/mem_detect.h>
+#include <asm/pgtable.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
@@ -89,8 +90,10 @@ static unsigned long get_random(unsigned long limit)
unsigned long get_random_base(unsigned long safe_addr)
{
+ unsigned long memory_limit = memory_end_set ? memory_end : 0;
unsigned long base, start, end, kernel_size;
unsigned long block_sum, offset;
+ unsigned long kasan_needs;
int i;
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
@@ -99,14 +102,36 @@ unsigned long get_random_base(unsigned long safe_addr)
}
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
+ if ((IS_ENABLED(CONFIG_KASAN))) {
+ /*
+ * Estimate kasan memory requirements, which it will reserve
+ * at the very end of available physical memory. To estimate
+ * that, we take into account that kasan would require
+ * 1/8 of available physical memory (for shadow memory) +
+ * creating page tables for the whole memory + shadow memory
+ * region (1 + 1/8). To keep page tables estimates simple take
+ * the double of combined ptes size.
+ */
+ memory_limit = get_mem_detect_end();
+ if (memory_end_set && memory_limit > memory_end)
+ memory_limit = memory_end;
+
+ /* for shadow memory */
+ kasan_needs = memory_limit / 8;
+ /* for paging structures */
+ kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
+ _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
+ memory_limit -= kasan_needs;
+ }
+
kernel_size = vmlinux.image_size + vmlinux.bss_size;
block_sum = 0;
for_each_mem_detect_block(i, &start, &end) {
- if (memory_end_set) {
- if (start >= memory_end)
+ if (memory_limit) {
+ if (start >= memory_limit)
break;
- if (end > memory_end)
- end = memory_end;
+ if (end > memory_limit)
+ end = memory_limit;
}
if (end - start < kernel_size)
continue;
@@ -124,11 +149,11 @@ unsigned long get_random_base(unsigned long safe_addr)
base = safe_addr;
block_sum = offset = 0;
for_each_mem_detect_block(i, &start, &end) {
- if (memory_end_set) {
- if (start >= memory_end)
+ if (memory_limit) {
+ if (start >= memory_limit)
break;
- if (end > memory_end)
- end = memory_end;
+ if (end > memory_limit)
+ end = memory_limit;
}
if (end - start < kernel_size)
continue;