summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/memory.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-07-21 14:25:33 +0100
committerMark Rutland <mark.rutland@arm.com>2017-08-15 18:36:04 +0100
commite3067861ba6650a566a6273738c23c956ad55c02 (patch)
tree95c523e8bbb75e883b02e659481fa9557f2e56e6 /arch/arm64/include/asm/memory.h
parentf60fe78f133243e6de0f05fdefc3ed2f3c5085ca (diff)
arm64: add basic VMAP_STACK support
This patch enables arm64 to be built with vmap'd task and IRQ stacks. As vmap'd stacks are mapped at page granularity, stacks must be a multiple of PAGE_SIZE. This means that a 64K page kernel must use stacks of at least 64K in size. To minimize the increase in Image size, IRQ stacks are dynamically allocated at boot time, rather than embedding the boot CPU's IRQ stack in the kernel image. This patch was co-authored by Ard Biesheuvel and Mark Rutland. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Laura Abbott <labbott@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/memory.h')
-rw-r--r--arch/arm64/include/asm/memory.h23
1 files changed, 22 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 7fa6ad48d574..c5cd2c599b24 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -102,7 +102,17 @@
#define KASAN_SHADOW_SIZE (0)
#endif
-#define THREAD_SHIFT 14
+#define MIN_THREAD_SHIFT 14
+
+/*
+ * VMAP'd stacks are allocated at page granularity, so we must ensure that such
+ * stacks are a multiple of page size.
+ */
+#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
+#define THREAD_SHIFT PAGE_SHIFT
+#else
+#define THREAD_SHIFT MIN_THREAD_SHIFT
+#endif
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
@@ -110,6 +120,17 @@
#define THREAD_SIZE (UL(1) << THREAD_SHIFT)
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
#define IRQ_STACK_SIZE THREAD_SIZE
/*