summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-05-01 08:58:44 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-01 08:58:44 -0700
commit58366af5861eee1479426380e3c91ecb334c301d (patch)
tree2c7e61d424279057ebeb2ef32b2e9648666848ca /arch
parent0339ad77c4a06fa8529db17c91f790058e18b65b (diff)
[PATCH] ppc64: update to use the new 4L headers
This patch converts ppc64 to use the generic pgtable-nopud.h instead of the "fixup" header. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/mm/hugetlbpage.c45
-rw-r--r--arch/ppc64/mm/init.c198
2 files changed, 123 insertions, 120 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index 390296efe3e0..d3bf86a5c1ad 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr)
return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
}
-static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
+static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
{
int index;
@@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
index = hugepgd_index(addr);
BUG_ON(index >= PTRS_PER_HUGEPGD);
- return mm->context.huge_pgdir + index;
+ return (pud_t *)(mm->context.huge_pgdir + index);
}
-static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr)
+static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr)
{
int index;
- if (pgd_none(*dir))
+ if (pud_none(*dir))
return NULL;
index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
- return (pte_t *)pgd_page(*dir) + index;
+ return (pte_t *)pud_page(*dir) + index;
}
-static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
+static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
{
BUG_ON(! in_hugepage_area(mm->context, addr));
@@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
return hugepgd_offset(mm, addr);
}
-static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
- unsigned long addr)
+static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr)
{
- if (! pgd_present(*dir)) {
+ if (! pud_present(*dir)) {
pte_t *new;
spin_unlock(&mm->page_table_lock);
@@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
*/
- if (pgd_present(*dir)) {
+ if (pud_present(*dir)) {
if (new)
kmem_cache_free(zero_cache, new);
} else {
@@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
ptepage = virt_to_page(new);
ptepage->mapping = (void *) mm;
ptepage->index = addr & HUGEPGDIR_MASK;
- pgd_populate(mm, dir, new);
+ pud_populate(mm, dir, new);
}
}
@@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
- pgd_t *pgd;
+ pud_t *pud;
BUG_ON(! in_hugepage_area(mm->context, addr));
- pgd = hugepgd_offset(mm, addr);
- if (! pgd)
+ pud = hugepgd_offset(mm, addr);
+ if (! pud)
return NULL;
- return hugepte_offset(pgd, addr);
+ return hugepte_offset(pud, addr);
}
static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
- pgd_t *pgd;
+ pud_t *pud;
BUG_ON(! in_hugepage_area(mm->context, addr));
- pgd = hugepgd_alloc(mm, addr);
- if (! pgd)
+ pud = hugepgd_alloc(mm, addr);
+ if (! pud)
return NULL;
- return hugepte_alloc(mm, pgd, addr);
+ return hugepte_alloc(mm, pud, addr);
}
static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm)
/* cleanup any hugepte pages leftover */
for (i = 0; i < PTRS_PER_HUGEPGD; i++) {
- pgd_t *pgd = pgdir + i;
+ pud_t *pud = (pud_t *)(pgdir + i);
- if (! pgd_none(*pgd)) {
- pte_t *pte = (pte_t *)pgd_page(*pgd);
+ if (! pud_none(*pud)) {
+ pte_t *pte = (pte_t *)pud_page(*pud);
struct page *ptepage = virt_to_page(pte);
ptepage->mapping = NULL;
@@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm)
BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE));
kmem_cache_free(zero_cache, pte);
}
- pgd_clear(pgd);
+ pud_clear(pud);
}
BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE));
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index a7149b9fc35c..cf33d7ec2e29 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr)
#else
+static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte;
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ unmap_im_area_pte(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ unmap_im_area_pmd(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+static void unmap_im_area(unsigned long addr, unsigned long end)
+{
+ struct mm_struct *mm = &ioremap_mm;
+ unsigned long next;
+ pgd_t *pgd;
+
+ spin_lock(&mm->page_table_lock);
+
+ pgd = pgd_offset_i(addr);
+ flush_cache_vunmap(addr, end);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ unmap_im_area_pud(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+ flush_tlb_kernel_range(start, end);
+
+ spin_unlock(&mm->page_table_lock);
+}
+
/*
* map_io_page currently only called by __ioremap
* map_io_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
-static void map_io_page(unsigned long ea, unsigned long pa, int flags)
+static int map_io_page(unsigned long ea, unsigned long pa, int flags)
{
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long vsid;
@@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
if (mem_init_done) {
spin_lock(&ioremap_mm.page_table_lock);
pgdp = pgd_offset_i(ea);
- pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);
+ pudp = pud_alloc(&ioremap_mm, pgdp, ea);
+ if (!pudp)
+ return -ENOMEM;
+ pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
+ if (!pmdp)
+ return -ENOMEM;
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
-
+ if (!ptep)
+ return -ENOMEM;
pa = abs_to_phys(pa);
set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
@@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
panic("map_io_page: could not insert mapping");
}
}
+ return 0;
}
@@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
flags |= pgprot_val(PAGE_KERNEL);
for (i = 0; i < size; i += PAGE_SIZE)
- map_io_page(ea+i, pa+i, flags);
+ if (map_io_page(ea+i, pa+i, flags))
+ goto failure;
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
+ failure:
+ if (mem_init_done)
+ unmap_im_area(ea, ea + size);
+ return NULL;
}
@@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size)
return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
}
-void __iomem *
-__ioremap(unsigned long addr, unsigned long size, unsigned long flags)
+void __iomem * __ioremap(unsigned long addr, unsigned long size,
+ unsigned long flags)
{
unsigned long pa, ea;
+ void __iomem *ret;
/*
* Choose an address to map it to.
@@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
if (area == NULL)
return NULL;
ea = (unsigned long)(area->addr);
+ ret = __ioremap_com(addr, pa, ea, size, flags);
+ if (!ret)
+ im_free(area->addr);
} else {
ea = ioremap_bot;
- ioremap_bot += size;
+ ret = __ioremap_com(addr, pa, ea, size, flags);
+ if (ret)
+ ioremap_bot += size;
}
-
- return __ioremap_com(addr, pa, ea, size, flags);
+ return ret;
}
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
@@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
unsigned long size, unsigned long flags)
{
struct vm_struct *area;
+ void __iomem *ret;
/* For now, require page-aligned values for pa, ea, and size */
if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
@@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
}
}
- if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) {
+ ret = __ioremap_com(pa, pa, ea, size, flags);
+ if (ret == NULL) {
+ printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
+ return 1;
+ }
+ if (ret != (void *) ea) {
printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
return 1;
}
@@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
return 0;
}
-static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
- unsigned long size)
-{
- unsigned long base, end;
- pte_t *pte;
-
- if (pmd_none(*pmd))
- return;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
-
- pte = pte_offset_kernel(pmd, address);
- base = address & PMD_MASK;
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
-
- do {
- pte_t page;
- page = ptep_get_and_clear(&ioremap_mm, base + address, pte);
- address += PAGE_SIZE;
- pte++;
- if (pte_none(page))
- continue;
- if (pte_present(page))
- continue;
- printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
- " table\n");
- } while (address < end);
-}
-
-static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
- unsigned long size)
-{
- unsigned long base, end;
- pmd_t *pmd;
-
- if (pgd_none(*dir))
- return;
- if (pgd_bad(*dir)) {
- pgd_ERROR(*dir);
- pgd_clear(dir);
- return;
- }
-
- pmd = pmd_offset(dir, address);
- base = address & PGDIR_MASK;
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
-
- do {
- unmap_im_area_pte(pmd, base + address, end - address);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
-}
-
/*
* Unmap an IO region and remove it from imalloc'd list.
* Access to IO memory should be serialized by driver.
@@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
*/
void iounmap(volatile void __iomem *token)
{
- unsigned long address, start, end, size;
- struct mm_struct *mm;
- pgd_t *dir;
+ unsigned long address, size;
void *addr;
- if (!mem_init_done) {
+ if (!mem_init_done)
return;
- }
addr = (void *) ((unsigned long __force) token & PAGE_MASK);
- if ((size = im_free(addr)) == 0) {
+ if ((size = im_free(addr)) == 0)
return;
- }
address = (unsigned long)addr;
- start = address;
- end = address + size;
-
- mm = &ioremap_mm;
- spin_lock(&mm->page_table_lock);
-
- dir = pgd_offset_i(address);
- flush_cache_vunmap(address, end);
- do {
- unmap_im_area_pmd(dir, address, end - address);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- flush_tlb_kernel_range(start, end);
-
- spin_unlock(&mm->page_table_lock);
- return;
+ unmap_im_area(address, address + size);
}
static int iounmap_subset_regions(unsigned long addr, unsigned long size)