summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 17:25:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 17:25:42 -0700
commite96ec8cf9ca12a8d6b3b896a2eccd4b92a1893ab (patch)
tree05f944a09bd3f30a04ef30f48e29f10ca4be397b /arch/x86
parentc813e8c9dff344a3b46bc9bba8aff5a7ebbc67e7 (diff)
parent2b32ab031e82a109e2c5b0d30ce563db0fe286b4 (diff)
Merge tag 'x86-mm-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mmm update from Ingo Molnar: "The biggest change is to not sync the vmalloc and ioremap ranges for x86-64 anymore" * tag 'x86-mm-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/64: Make sync_global_pgds() static x86/mm/64: Do not sync vmalloc/ioremap mappings x86/mm: Pre-allocate P4D/PUD pages for vmalloc area
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/mm/init_64.c59
3 files changed, 53 insertions, 10 deletions
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index d2af8c48ba50..56d0399a0cd1 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -168,8 +168,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
-extern void sync_global_pgds(unsigned long start, unsigned long end);
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 8f63efb2a2cc..52e5f5f2240d 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d;
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
-#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
-
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dbae185511cd..e65b96f381a7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -209,7 +209,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
* When memory was added make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
-void sync_global_pgds(unsigned long start, unsigned long end)
+static void sync_global_pgds(unsigned long start, unsigned long end)
{
if (pgtable_l5_enabled())
sync_global_pgds_l5(start, end);
@@ -217,11 +217,6 @@ void sync_global_pgds(unsigned long start, unsigned long end)
sync_global_pgds_l4(start, end);
}
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
-{
- sync_global_pgds(start, end);
-}
-
/*
* NOTE: This function is marked __ref because it calls __init function
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -1238,6 +1233,56 @@ static void __init register_page_bootmem_info(void)
#endif
}
+/*
+ * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
+ * Only the level which needs to be synchronized between all page-tables is
+ * allocated because the synchronization can be expensive.
+ */
+static void __init preallocate_vmalloc_pages(void)
+{
+ unsigned long addr;
+ const char *lvl;
+
+ for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d;
+ pud_t *pud;
+
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d)) {
+ /* Can only happen with 5-level paging */
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d) {
+ lvl = "p4d";
+ goto failed;
+ }
+ }
+
+ if (pgtable_l5_enabled())
+ continue;
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+ /* Ends up here only with 4-level paging */
+ pud = pud_alloc(&init_mm, p4d, addr);
+ if (!pud) {
+ lvl = "pud";
+ goto failed;
+ }
+ }
+ }
+
+ return;
+
+failed:
+
+ /*
+ * The pages have to be there now or they will be missing in
+ * process page-tables later.
+ */
+ panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
+}
+
void __init mem_init(void)
{
pci_iommu_alloc();
@@ -1261,6 +1306,8 @@ void __init mem_init(void)
if (get_gate_vma(&init_mm))
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
+ preallocate_vmalloc_pages();
+
mem_init_print_info(NULL);
}