diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 13 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 34 | ||||
-rw-r--r-- | arch/x86/mm/pageattr-test.c | 4 |
6 files changed, 33 insertions, 33 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e28cc5277b16..e4440d0abf81 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -382,7 +382,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, #ifdef CONFIG_X86_PAE if (error_code & PF_INSTR) { - int level; + unsigned int level; pte_t *pte = lookup_address(address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index da524fb22422..f2f36f8dae52 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -423,23 +423,23 @@ static void __init pagetable_init(void) paravirt_pagetable_setup_done(pgd_base); } -#if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI) +#ifdef CONFIG_ACPI_SLEEP /* - * Swap suspend & friends need this for resume because things like the intel-agp + * ACPI suspend needs this for resume, because things like the intel-agp * driver might have split up a kernel 4MB mapping. */ -char __nosavedata swsusp_pg_dir[PAGE_SIZE] +char swsusp_pg_dir[PAGE_SIZE] __attribute__ ((aligned(PAGE_SIZE))); static inline void save_pg_dir(void) { memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); } -#else +#else /* !CONFIG_ACPI_SLEEP */ static inline void save_pg_dir(void) { } -#endif +#endif /* !CONFIG_ACPI_SLEEP */ void zap_low_mappings(void) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index cc50a13ce8d9..eabcaed76c28 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -354,17 +354,10 @@ static void __init find_early_table_space(unsigned long end) * need roughly 0.5KB per GB. */ start = 0x8000; - table_start = find_e820_area(start, end, tables); + table_start = find_e820_area(start, end, tables, PAGE_SIZE); if (table_start == -1UL) panic("Cannot find space for the kernel page tables"); - /* - * When you have a lot of RAM like 256GB, early_table will not fit - * into 0x8000 range, find_e820_area() will find area after kernel - * bss but the table_start is not page aligned, so need to round it - * up to avoid overlap with bss: - */ - table_start = round_up(table_start, PAGE_SIZE); table_start >>= PAGE_SHIFT; table_end = table_start; @@ -420,7 +413,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) mmu_cr4_features = read_cr4(); __flush_tlb_all(); - reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); + if (!after_bootmem) + reserve_early(table_start << PAGE_SHIFT, + table_end << PAGE_SHIFT, "PGTABLE"); } #ifndef CONFIG_NUMA diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index a177d76e1c53..c004d94608fd 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -75,7 +75,8 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size, { unsigned long vaddr = (unsigned long)__va(paddr); unsigned long nrpages = size >> PAGE_SHIFT; - int err, level; + unsigned int level; + int err; /* No change for pages after the last mapping */ if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT)) diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index dc3b1f7e1451..a920d09b9194 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -84,26 +84,24 @@ static int __init populate_memnodemap(const struct bootnode *nodes, static int __init allocate_cachealigned_memnodemap(void) { - unsigned long pad, pad_addr; + unsigned long addr; memnodemap = memnode.embedded_map; if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) return 0; - pad = L1_CACHE_BYTES - 1; - pad_addr = 0x8000; - nodemap_size = pad + sizeof(s16) * memnodemapsize; - nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT, - nodemap_size); + addr = 0x8000; + nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); + nodemap_addr = find_e820_area(addr, end_pfn<<PAGE_SHIFT, + nodemap_size, L1_CACHE_BYTES); if (nodemap_addr == -1UL) { printk(KERN_ERR "NUMA: Unable to allocate Memory to Node hash map\n"); nodemap_addr = nodemap_size = 0; return -1; } - pad_addr = (nodemap_addr + pad) & ~pad; - memnodemap = phys_to_virt(pad_addr); - reserve_early(nodemap_addr, nodemap_addr + nodemap_size); + memnodemap = phys_to_virt(nodemap_addr); + reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", nodemap_addr, nodemap_addr + nodemap_size); @@ -164,15 +162,16 @@ int early_pfn_to_nid(unsigned long pfn) } static void * __init early_node_mem(int nodeid, unsigned long start, - unsigned long end, unsigned long size) + unsigned long end, unsigned long size, + unsigned long align) { - unsigned long mem = find_e820_area(start, end, size); + unsigned long mem = find_e820_area(start, end, size, align); void *ptr; if (mem != -1L) return __va(mem); - ptr = __alloc_bootmem_nopanic(size, - SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); + + ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); if (ptr == NULL) { printk(KERN_ERR "Cannot find %lu bytes in node %d\n", size, nodeid); @@ -198,7 +197,8 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; - node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size); + node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, + SMP_CACHE_BYTES); if (node_data[nodeid] == NULL) return; nodedata_phys = __pa(node_data[nodeid]); @@ -211,8 +211,12 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, /* Find a place for the bootmem map */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); + /* + * SMP_CAHCE_BYTES could be enough, but init_bootmem_node like + * to use that to align to PAGE_SIZE + */ bootmap = early_node_mem(nodeid, bootmap_start, end, - bootmap_pages<<PAGE_SHIFT); + bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); if (bootmap == NULL) { if (nodedata_phys < start || nodedata_phys >= end) free_bootmem((unsigned long)node_data[nodeid], diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 06353d43f72e..7573e786d2f2 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -42,7 +42,7 @@ static __init int print_split(struct split_state *s) s->max_exec = 0; for (i = 0; i < max_pfn_mapped; ) { unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT); - int level; + unsigned int level; pte_t *pte; pte = lookup_address(addr, &level); @@ -106,7 +106,7 @@ static __init int exercise_pageattr(void) unsigned long *bm; pte_t *pte, pte0; int failed = 0; - int level; + unsigned int level; int i, k; int err; |