aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/kaslr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/kaslr.c')
-rw-r--r--arch/arm64/kernel/kaslr.c35
1 files changed, 12 insertions, 23 deletions
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 47080c49cc7e..f0e6ab8abe9c 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -117,53 +117,42 @@ u64 __init kaslr_early_init(u64 dt_phys)
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * lower half of the VMALLOC area (VA_BITS - 2).
+ * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
+ * the lower and upper quarters to avoid colliding with other
+ * allocations.
* Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs
*/
mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = seed & mask;
+ offset = BIT(VA_BITS - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
- /*
- * The kernel Image should not extend across a 1GB/32MB/512MB alignment
- * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
- *
- * NOTE: The references to _text and _end below will already take the
- * modulo offset (the physical displacement modulo 2 MB) into
- * account, given that the physical placement is controlled by
- * the loader, and will not change as a result of the virtual
- * mapping we choose.
- */
- if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
- offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
-
if (IS_ENABLED(CONFIG_KASAN))
/*
* KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each
* module at load time, whereas the vmalloc region is shadowed
* by KASAN zero pages. So keep modules out of the vmalloc
- * region if KASAN is enabled.
+ * region if KASAN is enabled, and put the kernel well within
+ * 4 GB of the module region.
*/
- return offset;
+ return offset % SZ_2G;
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
- * Randomize the module region independently from the core
- * kernel. This prevents modules from leaking any information
+ * Randomize the module region over a 4 GB window covering the
+ * kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
- module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
- module_alloc_base = VMALLOC_START;
+ module_range = SZ_4G - (u64)(_end - _stext);
+ module_alloc_base = max((u64)_end + offset - SZ_4G,
+ (u64)MODULES_VADDR);
} else {
/*
* Randomize the module region by setting module_alloc_base to