diff options
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/fault.c | 11 | ||||
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 29 |
2 files changed, 17 insertions, 23 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index e0330921676..5d6b2114cc9 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -234,6 +234,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, /* * Handle a fault on the vmalloc or module mapping area + * + * This assumes no large pages in there. */ static int vmalloc_fault(unsigned long address) { @@ -272,7 +274,10 @@ static int vmalloc_fault(unsigned long address) if (!pte_present(*pte_ref)) return -1; pte = pte_offset_kernel(pmd, address); - if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref)) + /* Don't use pte_page here, because the mappings can point + outside mem_map, and the NUMA hash lookup cannot handle + that. */ + if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) BUG(); __flush_tlb_all(); return 0; @@ -346,7 +351,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) * protection error (error_code & 1) == 0. */ if (unlikely(address >= TASK_SIZE)) { - if (!(error_code & 5)) { + if (!(error_code & 5) && + ((address >= VMALLOC_START && address < VMALLOC_END) || + (address >= MODULES_VADDR && address < MODULES_END))) { if (vmalloc_fault(address) < 0) goto bad_area_nosemaphore; return; diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index 74ec8554b19..58aac23760e 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, unsigned long flags) { int err = 0; - if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { + if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long vaddr = (unsigned long) __va(phys_addr); @@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); return NULL; } - if (ioremap_change_attr(phys_addr, size, flags) < 0) { + if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) { area->flags &= 0xffffff; vunmap(addr); return NULL; @@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) void iounmap(volatile void __iomem *addr) { - struct vm_struct *p, **pprev; + struct vm_struct *p; if (addr <= high_memory) return; @@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr) return; write_lock(&vmlist_lock); - for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) - if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) - break; - if (!p) { - printk("__iounmap: bad address %p\n", addr); - goto out_unlock; - } - *pprev = p->next; - unmap_vm_area(p); - if ((p->flags >> 20) && - p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) { - /* p->size includes the guard page, but cpa doesn't like that */ - change_page_attr(virt_to_page(__va(p->phys_addr)), - p->size >> PAGE_SHIFT, - PAGE_KERNEL); - global_flush_tlb(); - } -out_unlock: + p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK)); + if (!p) + printk("iounmap: bad address %p\n", addr); + else if (p->flags >> 20) + ioremap_change_attr(p->phys_addr, p->size, 0); write_unlock(&vmlist_lock); kfree(p); } |