diff options
author | Bibo Mao <maobibo@loongson.cn> | 2023-09-06 22:53:10 +0800 |
---|---|---|
committer | Huacai Chen <chenhuacai@loongson.cn> | 2023-09-06 22:53:10 +0800 |
commit | 0921af6ccfb37dc2d6aefcf744333c14e7ca739d (patch) | |
tree | 4bdb59dc579e6b2a98fe02142261a629cb22469d /arch | |
parent | 2bb20d2926a8ea991386315aa8017990ef7beb6a (diff) |
LoongArch: Use static defined zero page rather than allocated
On LoongArch system, there is only one page needed for zero page (no
cache synonyms), and there is no COLOR_ZERO_PAGE, so zero_page_mask is
useless and the macro __HAVE_COLOR_ZERO_PAGE is not necessary.
Like other popular architectures, It is simpler to define the zero page
in kernel BSS code segment rather than dynamically allocate.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/loongarch/include/asm/mmzone.h | 2 | ||||
-rw-r--r-- | arch/loongarch/include/asm/pgtable.h | 7 | ||||
-rw-r--r-- | arch/loongarch/kernel/numa.c | 1 | ||||
-rw-r--r-- | arch/loongarch/mm/init.c | 28 |
4 files changed, 3 insertions, 35 deletions
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h index fe67d0b4b33d..2b9a90727e19 100644 --- a/arch/loongarch/include/asm/mmzone.h +++ b/arch/loongarch/include/asm/mmzone.h @@ -13,6 +13,4 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[(nid)]) -extern void setup_zero_pages(void); - #endif /* _ASM_MMZONE_H_ */ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 0ac6afa4a825..7699af049443 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -70,12 +70,9 @@ struct vm_area_struct; * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page; -extern unsigned long zero_page_mask; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) \ - (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) -#define __HAVE_COLOR_ZERO_PAGE +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) /* * TLB refill handlers may also map the vmalloc area into xkvrange. diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index cb00804826f7..c7d33c489e04 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -438,7 +438,6 @@ void __init mem_init(void) { high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); - setup_zero_pages(); /* This comes from node 0 */ } int pcibus_to_node(struct pci_bus *bus) diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 0f1dee285da2..f3fe8c06ba4d 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -35,33 +35,8 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> -/* - * We have up to 8 empty zeroed pages so we can map one of the right colour - * when needed. Since page is never written to after the initialization we - * don't have to care about aliases on other CPUs. - */ -unsigned long empty_zero_page, zero_page_mask; +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -EXPORT_SYMBOL(zero_page_mask); - -void setup_zero_pages(void) -{ - unsigned int order, i; - struct page *page; - - order = 0; - - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Oh boy, that early out of memory?"); - - page = virt_to_page((void *)empty_zero_page); - split_page(page, order); - for (i = 0; i < (1 << order); i++, page++) - mark_page_reserved(page); - - zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; -} void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) @@ -106,7 +81,6 @@ void __init mem_init(void) high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); - setup_zero_pages(); /* Setup zeroed pages. */ } #endif /* !CONFIG_NUMA */ |