diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 23 | ||||
-rw-r--r-- | arch/ia64/configs/bigsur_defconfig | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/meminit.h | 11 | ||||
-rw-r--r-- | arch/ia64/include/asm/module.h | 6 | ||||
-rw-r--r-- | arch/ia64/include/asm/page.h | 25 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgtable.h | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/ia64_ksyms.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/machine_kexec.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/pal.S | 6 | ||||
-rw-r--r-- | arch/ia64/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 21 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 15 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 221 |
21 files changed, 58 insertions, 360 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 2ad7a8d29fcc..81e2b893b1e7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -286,15 +286,6 @@ config FORCE_CPEI_RETARGET config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_DISCONTIGMEM_ENABLE - def_bool y - depends on BROKEN - help - Say Y to support efficient handling of discontiguous physical memory, - for architectures which are either NUMA (Non-Uniform Memory Access) - or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa.rst> for more. - config ARCH_FLATMEM_ENABLE def_bool y @@ -325,22 +316,8 @@ config NODES_SHIFT MAX_NUMNODES will be 2^(This value). If in doubt, use the default. -# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. -# VIRTUAL_MEM_MAP has been retained for historical reasons. -config VIRTUAL_MEM_MAP - bool "Virtual mem map" - depends on !SPARSEMEM && !FLATMEM - default y - help - Say Y to compile the kernel with support for a virtual mem map. - This code also only takes effect if a memory hole of greater than - 1 Gb is found during boot. You must turn this option on if you - require the DISCONTIGMEM option for your machine. If you are - unsure, say Y. - config HOLES_IN_ZONE bool - default y if VIRTUAL_MEM_MAP config HAVE_ARCH_NODEDATA_EXTENSION def_bool y diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index c409756b5396..0341a67cc1bf 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -9,7 +9,6 @@ CONFIG_SGI_PARTITION=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y -# CONFIG_VIRTUAL_MEM_MAP is not set CONFIG_IA64_PALINFO=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index e789c0818edb..6c47a239fc26 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -58,15 +58,4 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end); extern int register_active_ranges(u64 start, u64 len, int nid); -#ifdef CONFIG_VIRTUAL_MEM_MAP - extern unsigned long VMALLOC_END; - extern struct page *vmem_map; - extern int create_mem_map_page_table(u64 start, u64 end, void *arg); - extern int vmemmap_find_next_valid_pfn(int, int); -#else -static inline int vmemmap_find_next_valid_pfn(int node, int i) -{ - return i + 1; -} -#endif #endif /* meminit_h */ diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index 5a29652e6def..7271b9c5fc76 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h @@ -14,16 +14,20 @@ struct elf64_shdr; /* forward declration */ struct mod_arch_specific { + /* Used only at module load time. */ struct elf64_shdr *core_plt; /* core PLT section */ struct elf64_shdr *init_plt; /* init PLT section */ struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *unwind; /* unwind-table section */ unsigned long gp; /* global-pointer for module */ + unsigned int next_got_entry; /* index of next available got entry */ + /* Used at module run and cleanup time. */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */ void *init_unw_table; /* init unwind-table cookie returned by unwinder */ - unsigned int next_got_entry; /* index of next available got entry */ + void *opd_addr; /* symbolize uses .opd to get to actual function */ + unsigned long opd_size; }; #define ARCH_SHF_SMALL SHF_IA_64_SHORT diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index b69a5499d75b..f4dc81fa7146 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -95,31 +95,10 @@ do { \ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#ifdef CONFIG_VIRTUAL_MEM_MAP -extern int ia64_pfn_valid (unsigned long pfn); -#else -# define ia64_pfn_valid(pfn) 1 -#endif - -#ifdef CONFIG_VIRTUAL_MEM_MAP -extern struct page *vmem_map; -#ifdef CONFIG_DISCONTIGMEM -# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) -# define pfn_to_page(pfn) (vmem_map + (pfn)) -# define __pfn_to_phys(pfn) PFN_PHYS(pfn) -#else -# include <asm-generic/memory_model.h> -#endif -#else -# include <asm-generic/memory_model.h> -#endif +#include <asm-generic/memory_model.h> #ifdef CONFIG_FLATMEM -# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) -#elif defined(CONFIG_DISCONTIGMEM) -extern unsigned long min_low_pfn; -extern unsigned long max_low_pfn; -# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) +# define pfn_valid(pfn) ((pfn) < max_mapnr) #endif #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9b4efe89e62d..d765fd948fae 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -223,10 +223,6 @@ ia64_phys_addr_valid (unsigned long addr) #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) -#ifdef CONFIG_VIRTUAL_MEM_MAP -# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) -extern unsigned long VMALLOC_END; -#else #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10))) @@ -234,7 +230,6 @@ extern unsigned long VMALLOC_END; #else # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) #endif -#endif /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) @@ -328,7 +323,7 @@ extern void __ia64_sync_icache_dcache(pte_t pteval); static inline void set_pte(pte_t *ptep, pte_t pteval) { /* page is present && page is user && page is executable - * && (page swapin or new page or page migraton + * && (page swapin or new page or page migration * || copy_on_write with page copying.) */ if (pte_present_exec_user(pteval) && diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 78717819131c..08d4a2ba0652 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -9,7 +9,7 @@ endif extra-y := head.o vmlinux.lds -obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ +obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a5636524af76..e2af6b172200 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void) if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); - return; + slit_distance(0, 0) = LOCAL_DISTANCE; + goto out; } /* @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void) for (j = 0; j < MAX_NUMNODES; j++) slit_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return; + goto out; } memset(numa_slit, -1, sizeof(numa_slit)); @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void) printk("\n"); } #endif +out: + node_possible_map = node_online_map; } #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c5fe21de46a8..31149e41f9be 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -415,10 +415,10 @@ efi_get_pal_addr (void) mask = ~((1 << IA64_GRANULE_SHIFT) - 1); printk(KERN_INFO "CPU %d: mapping PAL code " - "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", - smp_processor_id(), md->phys_addr, - md->phys_addr + efi_md_size(md), - vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); + "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n", + smp_processor_id(), md->phys_addr, + md->phys_addr + efi_md_size(md), + vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); #endif return __va(md->phys_addr); } @@ -560,6 +560,7 @@ efi_init (void) { efi_memory_desc_t *md; void *p; + unsigned int i; for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) @@ -586,7 +587,7 @@ efi_init (void) } printk("mem%02d: %s " - "range=[0x%016lx-0x%016lx) (%4lu%s)\n", + "range=[0x%016llx-0x%016llx) (%4lu%s)\n", i, efi_md_typeattr_format(buf, sizeof(buf), md), md->phys_addr, md->phys_addr + efi_md_size(md), size, unit); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 0750a716adc7..2094f3249019 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -172,7 +172,7 @@ ENTRY(fsys_gettimeofday) // r25 = itc_lastcycle value // r26 = address clocksource cycle_last // r27 = (not used) - // r28 = sequence number at the beginning of critcal section + // r28 = sequence number at the beginning of critical section // r29 = address of itc_jitter // r30 = time processing flags / memory address // r31 = pointer to result @@ -432,7 +432,7 @@ GLOBAL_ENTRY(fsys_bubble_down) * - r29: psr * * We used to clear some PSR bits here but that requires slow - * serialization. Fortuntely, that isn't really necessary. + * serialization. Fortunately, that isn't really necessary. * The rationale is as follows: we used to clear bits * ~PSR_PRESERVED_BITS in PSR.L. Since * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 30f1ef760136..f22469f1c1fc 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -33,7 +33,6 @@ #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> -#include <linux/pgtable.h> #include <asm/export.h> #ifdef CONFIG_HOTPLUG_CPU @@ -405,11 +404,6 @@ start_ap: // This is executed by the bootstrap processor (bsp) only: -#ifdef CONFIG_IA64_FW_EMU - // initialize PAL & SAL emulator: - br.call.sptk.many rp=sys_fw_init -.ret1: -#endif br.call.sptk.many rp=start_kernel .ret2: addl r3=@ltoff(halt_msg),gp ;; diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c deleted file mode 100644 index f8150ee74f29..000000000000 --- a/arch/ia64/kernel/ia64_ksyms.c +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Architecture-specific kernel symbols - */ - -#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM) -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/memblock.h> -EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ -EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ -#endif diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index af310dc8a356..4db9ca144fa5 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -143,7 +143,7 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { -#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM) +#if defined(CONFIG_SPARSEMEM) VMCOREINFO_SYMBOL(pgdat_list); VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES); #endif diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index adf6521525f4..cdbac4b52f30 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -109,9 +109,9 @@ #include "irq.h" #if defined(IA64_MCA_DEBUG_INFO) -# define IA64_MCA_DEBUG(fmt...) printk(fmt) +# define IA64_MCA_DEBUG(fmt...) printk(fmt) #else -# define IA64_MCA_DEBUG(fmt...) +# define IA64_MCA_DEBUG(fmt...) do {} while (0) #endif #define NOTIFY_INIT(event, regs, arg, spin) \ diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 00a496cb346f..2cba53c1da82 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod) int module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + struct mod_arch_specific *mas = &mod->arch; + DEBUGP("%s: init: entry=%p\n", __func__, mod->init); - if (mod->arch.unwind) + if (mas->unwind) register_unwind_table(mod); + + /* + * ".opd" was already relocated to the final destination. Store + * it's address for use in symbolizer. + */ + mas->opd_addr = (void *)mas->opd->sh_addr; + mas->opd_size = mas->opd->sh_size; + + /* + * Module relocation was already done at this point. Section + * headers are about to be deleted. Wipe out load-time context. + */ + mas->core_plt = NULL; + mas->init_plt = NULL; + mas->got = NULL; + mas->opd = NULL; + mas->unwind = NULL; + mas->gp = 0; + mas->next_got_entry = 0; + return 0; } @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod) void *dereference_module_function_descriptor(struct module *mod, void *ptr) { - Elf64_Shdr *opd = mod->arch.opd; + struct mod_arch_specific *mas = &mod->arch; - if (ptr < (void *)opd->sh_addr || - ptr >= (void *)(opd->sh_addr + opd->sh_size)) + if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size) return ptr; return dereference_function_descriptor(ptr); diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index d3e22c018b68..06d01a070aae 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S @@ -86,7 +86,7 @@ GLOBAL_ENTRY(ia64_pal_call_static) mov ar.pfs = loc1 mov rp = loc0 ;; - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_static) EXPORT_SYMBOL(ia64_pal_call_static) @@ -194,7 +194,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_static) EXPORT_SYMBOL(ia64_pal_call_phys_static) @@ -252,7 +252,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_stacked) EXPORT_SYMBOL(ia64_pal_call_phys_stacked) diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index 99a35039b548..c03f63c62ac4 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile @@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o ioremap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o -obj-$(CONFIG_DISCONTIGMEM) += discontig.o obj-$(CONFIG_SPARSEMEM) += discontig.o obj-$(CONFIG_FLATMEM) += contig.o diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 62fe80a16f42..42e025cfbd08 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -153,11 +153,7 @@ find_memory (void) efi_memmap_walk(find_max_min_low_pfn, NULL); max_pfn = max_low_pfn; -#ifdef CONFIG_VIRTUAL_MEM_MAP - efi_memmap_walk(filter_memory, register_active_ranges); -#else memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); -#endif find_initrd(); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c310b4c99fb3..791d4176e4a6 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -585,25 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) } } -static void __init virtual_map_init(void) -{ -#ifdef CONFIG_VIRTUAL_MEM_MAP - int node; - - VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * - sizeof(struct page)); - vmem_map = (struct page *) VMALLOC_END; - efi_memmap_walk(create_mem_map_page_table, NULL); - printk("Virtual mem_map starts at 0x%p\n", vmem_map); - - for_each_online_node(node) { - unsigned long pfn_offset = mem_data[node].min_pfn; - - NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; - } -#endif -} - /** * paging_init - setup page tables * @@ -619,8 +600,6 @@ void __init paging_init(void) sparse_init(); - virtual_map_init(); - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA32] = max_dma; max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index cd9766d2b6e0..02de2e70c587 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -84,18 +84,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (faulthandler_disabled() || !mm) goto no_context; -#ifdef CONFIG_VIRTUAL_MEM_MAP - /* - * If fault is in region 5 and we are in the kernel, we may already - * have the mmap_lock (pfn_valid macro is called during mmap). There - * is no vma for region 5 addr's anyway, so skip getting the semaphore - * and go directly to the exception handling code. - */ - - if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) - goto bad_area_no_up; -#endif - /* * This is to handle the kprobes on user space access instructions */ @@ -213,9 +201,6 @@ retry: bad_area: mmap_read_unlock(mm); -#ifdef CONFIG_VIRTUAL_MEM_MAP - bad_area_no_up: -#endif if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 16d0d7d22657..064a967a7b6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -43,13 +43,6 @@ extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; -#ifdef CONFIG_VIRTUAL_MEM_MAP -unsigned long VMALLOC_END = VMALLOC_END_INIT; -EXPORT_SYMBOL(VMALLOC_END); -struct page *vmem_map; -EXPORT_SYMBOL(vmem_map); -#endif - struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); @@ -373,212 +366,6 @@ void ia64_mmu_init(void *my_cpu_data) #endif } -#ifdef CONFIG_VIRTUAL_MEM_MAP -int vmemmap_find_next_valid_pfn(int node, int i) -{ - unsigned long end_address, hole_next_pfn; - unsigned long stop_address; - pg_data_t *pgdat = NODE_DATA(node); - - end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; - end_address = PAGE_ALIGN(end_address); - stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; - - do { - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset_k(end_address); - if (pgd_none(*pgd)) { - end_address += PGDIR_SIZE; - continue; - } - - p4d = p4d_offset(pgd, end_address); - if (p4d_none(*p4d)) { - end_address += P4D_SIZE; - continue; - } - - pud = pud_offset(p4d, end_address); - if (pud_none(*pud)) { - end_address += PUD_SIZE; - continue; - } - - pmd = pmd_offset(pud, end_address); - if (pmd_none(*pmd)) { - end_address += PMD_SIZE; - continue; - } - - pte = pte_offset_kernel(pmd, end_address); -retry_pte: - if (pte_none(*pte)) { - end_address += PAGE_SIZE; - pte++; - if ((end_address < stop_address) && - (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) - goto retry_pte; - continue; - } - /* Found next valid vmem_map page */ - break; - } while (end_address < stop_address); - - end_address = min(end_address, stop_address); - end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; - hole_next_pfn = end_address / sizeof(struct page); - return hole_next_pfn - pgdat->node_start_pfn; -} - -int __init create_mem_map_page_table(u64 start, u64 end, void *arg) -{ - unsigned long address, start_page, end_page; - struct page *map_start, *map_end; - int node; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - start_page = (unsigned long) map_start & PAGE_MASK; - end_page = PAGE_ALIGN((unsigned long) map_end); - node = paddr_to_nid(__pa(start)); - - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = pgd_offset_k(address); - if (pgd_none(*pgd)) { - p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!p4d) - goto err_alloc; - pgd_populate(&init_mm, pgd, p4d); - } - p4d = p4d_offset(pgd, address); - - if (p4d_none(*p4d)) { - pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pud) - goto err_alloc; - p4d_populate(&init_mm, p4d, pud); - } - pud = pud_offset(p4d, address); - - if (pud_none(*pud)) { - pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pmd) - goto err_alloc; - pud_populate(&init_mm, pud, pmd); - } - pmd = pmd_offset(pud, address); - - if (pmd_none(*pmd)) { - pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pte) - goto err_alloc; - pmd_populate_kernel(&init_mm, pmd, pte); - } - pte = pte_offset_kernel(pmd, address); - - if (pte_none(*pte)) { - void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, - node); - if (!page) - goto err_alloc; - set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT, - PAGE_KERNEL)); - } - } - return 0; - -err_alloc: - panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n", - __func__, PAGE_SIZE, PAGE_SIZE, node); - return -ENOMEM; -} - -struct memmap_init_callback_data { - struct page *start; - struct page *end; - int nid; - unsigned long zone; -}; - -static int __meminit -virtual_memmap_init(u64 start, u64 end, void *arg) -{ - struct memmap_init_callback_data *args; - struct page *map_start, *map_end; - - args = (struct memmap_init_callback_data *) arg; - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - if (map_start < args->start) - map_start = args->start; - if (map_end > args->end) - map_end = args->end; - - /* - * We have to initialize "out of bounds" struct page elements that fit completely - * on the same pages that were allocated for the "in bounds" elements because they - * may be referenced later (and found to be "reserved"). - */ - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); - map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) - / sizeof(struct page)); - - if (map_start < map_end) - memmap_init_range((unsigned long)(map_end - map_start), - args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - return 0; -} - -void __meminit memmap_init_zone(struct zone *zone) -{ - int nid = zone_to_nid(zone), zone_id = zone_idx(zone); - unsigned long start_pfn = zone->zone_start_pfn; - unsigned long size = zone->spanned_pages; - - if (!vmem_map) { - memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size, - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - } else { - struct page *start; - struct memmap_init_callback_data args; - - start = pfn_to_page(start_pfn); - args.start = start; - args.end = start + size; - args.nid = nid; - args.zone = zone_id; - - efi_memmap_walk(virtual_memmap_init, &args); - } -} - -int -ia64_pfn_valid (unsigned long pfn) -{ - char byte; - struct page *pg = pfn_to_page(pfn); - - return (__get_user(byte, (char __user *) pg) == 0) - && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) - || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); -} -EXPORT_SYMBOL(ia64_pfn_valid); - -#endif /* CONFIG_VIRTUAL_MEM_MAP */ - int __init register_active_ranges(u64 start, u64 len, int nid) { u64 end = start + len; @@ -644,13 +431,16 @@ mem_init (void) * _before_ any drivers that may need the PCI DMA interface are * initialized or bootmem has been freed. */ + do { #ifdef CONFIG_INTEL_IOMMU - detect_intel_iommu(); - if (!iommu_detected) + detect_intel_iommu(); + if (iommu_detected) + break; #endif #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif + } while (0); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); @@ -659,7 +449,6 @@ mem_init (void) set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary |