summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorNeeraj Upadhyay <neeraju@codeaurora.org>2017-03-22 17:08:25 +0530
committerWill Deacon <will.deacon@arm.com>2017-03-22 15:43:11 +0000
commitafd0e5a876703accb95894f23317a13e2c49b523 (patch)
tree083b96df6a2a39182800e38a23f6f008cdc5d8a7 /arch
parent713cc9df6473f0cc8d699987d990482d432c0679 (diff)
arm64: kaslr: Fix up the kernel image alignment
If kernel image extends across alignment boundary, existing code increases the KASLR offset by size of kernel image. The offset is masked after resizing. There are cases, where after masking, we may still have kernel image extending across boundary. This eventually results in only 2MB block getting mapped while creating the page tables. This results in data aborts while accessing unmapped regions during second relocation (with kaslr offset) in __primary_switch. To fix this problem, round up the kernel image size, by swapper block size, before adding it for correction. For example consider below case, where kernel image still crosses 1GB alignment boundary, after masking the offset, which is fixed by rounding up kernel image size. SWAPPER_TABLE_SHIFT = 30 Swapper using section maps with section size 2MB. CONFIG_PGTABLE_LEVELS = 3 VA_BITS = 39 _text : 0xffffff8008080000 _end : 0xffffff800aa1b000 offset : 0x1f35600000 mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1) (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7c (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d offset after existing correction (before mask) = 0x1f37f9b000 (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d offset (after mask) = 0x1f37e00000 (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7c (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d new offset w/ rounding up = 0x1f38000000 (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d Fixes: f80fb3a3d508 ("arm64: add support for kernel ASLR") Cc: <stable@vger.kernel.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org> Signed-off-by: Srinivas Ramana <sramana@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/kaslr.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24ef628c..d7e90d97f5c4 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image.
+ * happens, increase the KASLR offset by the size of the kernel image
+ * rounded up by SWAPPER_BLOCK_SIZE.
*/
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
- offset = (offset + (u64)(_end - _text)) & mask;
+ (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+ u64 kimg_sz = _end - _text;
+ offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+ & mask;
+ }
if (IS_ENABLED(CONFIG_KASAN))
/*