aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/kaslr.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-04-18 17:09:47 +0200
committerWill Deacon <will.deacon@arm.com>2016-04-26 12:23:28 +0100
commit08cdac619c81b3fa8cd73aeed2330ffe0a0b73ca (patch)
tree70f8acd4853280472d0fd02d5145997b0c9be886 /arch/arm64/kernel/kaslr.c
parentarm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it (diff)
downloadlinux-dev-08cdac619c81b3fa8cd73aeed2330ffe0a0b73ca.tar.xz
linux-dev-08cdac619c81b3fa8cd73aeed2330ffe0a0b73ca.zip
arm64: relocatable: deal with physically misaligned kernel images
When booting a relocatable kernel image, there is no practical reason to refuse an image whose load address is not exactly TEXT_OFFSET bytes above a 2 MB aligned base address, as long as the physical and virtual misalignment with respect to the swapper block size are equal, and are both aligned to THREAD_SIZE. Since the virtual misalignment is under our control when we first enter the kernel proper, we can simply choose its value to be equal to the physical misalignment. So treat the misalignment of the physical load address as the initial KASLR offset, and fix up the remaining code to deal with that. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to '')
-rw-r--r--arch/arm64/kernel/kaslr.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 582983920054..b05469173ba5 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -74,7 +74,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
-u64 __init kaslr_early_init(u64 dt_phys)
+u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
{
void *fdt;
u64 seed, offset, mask, module_range;
@@ -132,8 +132,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
* happens, increase the KASLR offset by the size of the kernel image.
*/
- if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+ if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
+ (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
offset = (offset + (u64)(_end - _text)) & mask;
if (IS_ENABLED(CONFIG_KASAN))