summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2016-01-05 15:36:59 +0000
committerWill Deacon <will.deacon@arm.com>2016-01-05 15:43:10 +0000
commit39b5be9b4233a9f212b98242bddf008f379b5122 (patch)
treea43438246dfcc4d0deca54c2823904faac99f94b
parentf930896967fa3f9ab16a6f87267b92798308d48f (diff)
arm64: mm: move pgd_cache initialisation to pgtable_cache_init
Initialising the suppport for EFI runtime services requires us to allocate a pgd off the back of an early_initcall. On systems where the PGD_SIZE is smaller than PAGE_SIZE (e.g. 64k pages and 48-bit VA), the pgd_cache isn't initialised at this stage, and we panic with a NULL dereference during boot: Unable to handle kernel NULL pointer dereference at virtual address 00000000 __create_mapping.isra.5+0x84/0x350 create_pgd_mapping+0x20/0x28 efi_create_mapping+0x5c/0x6c arm_enable_runtime_services+0x154/0x1e4 do_one_initcall+0x8c/0x190 kernel_init_freeable+0x84/0x1ec kernel_init+0x10/0xe0 ret_from_fork+0x10/0x50 This patch fixes the problem by initialising the pgd_cache earlier, in the pgtable_cache_init callback, which sounds suspiciously like what it was intended for. Reported-by: Dennis Chen <dennis.chen@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/mm/pgd.c12
2 files changed, 8 insertions, 7 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 35a318c2fd87..a87e964d2791 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -676,7 +676,8 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-#define pgtable_cache_init() do { } while (0)
+void pgd_cache_init(void);
+#define pgtable_cache_init pgd_cache_init
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index cb3ba1b812e7..ae11d4e03d0e 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -46,14 +46,14 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
+ if (PGD_SIZE == PAGE_SIZE)
+ return;
+
/*
* Naturally aligned pgds required by the architecture.
*/
- if (PGD_SIZE != PAGE_SIZE)
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
- SLAB_PANIC, NULL);
- return 0;
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
}
-core_initcall(pgd_cache_init);