summaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-09 21:31:56 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-09 21:31:56 +0100
commit6a4690c22f5da1eb1c898b61b6a80da52fbd976f (patch)
treea03891a32abe0da191fb765fe669a597e07423c6 /arch/arm/mm/mmu.c
parent90bb28b0644f7324f8bd1feb27b35146e6785ba2 (diff)
parent8ec53663d2698076468b3e1edc4e1b418bd54de3 (diff)
Merge branch 'ptebits' into devel
Conflicts: arch/arm/Kconfig
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c74
1 files changed, 42 insertions, 32 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e7af83e569d..8ba75406455 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -66,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = {
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
- .pte = 0,
+ .pte = L_PTE_MT_UNCACHED,
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
- .pte = PTE_BUFFERABLE,
+ .pte = L_PTE_MT_BUFFERABLE,
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
- .pte = PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITETHROUGH,
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEBACK,
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEALLOC,
}
};
@@ -184,29 +184,28 @@ void adjust_cr(unsigned long mask, unsigned long set)
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
- .prot_pte = PROT_PTE_DEVICE,
- .prot_pte_ext = PTE_EXT_TEX(2),
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
- .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
- [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */
- .prot_pte = PROT_PTE_DEVICE,
+ [MT_DEVICE_WC] = { /* ioremap_wc */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
- PMD_SECT_TEX(1),
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
@@ -251,7 +250,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot;
+ unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -269,6 +268,20 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
+#ifdef CONFIG_SMP
+ cachepolicy = CPOLICY_WRITEALLOC;
+#endif
+
+ /*
+ * On non-Xscale3 ARMv5-and-older systems, use CB=01
+ * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
+ * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
+ * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
+ */
+ if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
+ }
/*
* ARMv5 and lower, bit 4 must be set for page tables.
@@ -290,7 +303,15 @@ static void __init build_mem_type_table(void)
}
cp = &cache_policies[cachepolicy];
- kern_pgprot = user_pgprot = cp->pte;
+ vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+
+#ifndef CONFIG_SMP
+ /*
+ * Only use write-through for non-SMP systems
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
+#endif
/*
* Enable CPU-specific coherency if supported.
@@ -318,7 +339,6 @@ static void __init build_mem_type_table(void)
/*
* Mark the device area as "shared device"
*/
- mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
#ifdef CONFIG_SMP
@@ -327,30 +347,21 @@ static void __init build_mem_type_table(void)
*/
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
#endif
}
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
- v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
- protection_map[i] = __pgprot(v);
+ protection_map[i] = __pgprot(v | user_pgprot);
}
- mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
- mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
+ mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
- if (cpu_arch >= CPU_ARCH_ARMv5) {
-#ifndef CONFIG_SMP
- /*
- * Only use write-through for non-SMP systems
- */
- mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
-#endif
- } else {
+ if (cpu_arch < CPU_ARCH_ARMv5)
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
- }
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
@@ -398,8 +409,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
- type->prot_pte_ext);
+ set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}