diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r3k.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 33 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 71 | ||||
-rw-r--r-- | arch/mips/mm/sc-rm7k.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 3 |
7 files changed, 77 insertions, 42 deletions
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 76935e320214..27a5b466c85c 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -26,7 +26,7 @@ static unsigned long icache_size, dcache_size; /* Size in bytes */ static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ -unsigned long __init r3k_cache_size(unsigned long ca_flags) +unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) { unsigned long flags, status, dummy, size; volatile unsigned long *p; @@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags) return size * sizeof(*p); } -unsigned long __init r3k_cache_lsize(unsigned long ca_flags) +unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) { unsigned long flags, status, lsize, i; volatile unsigned long *p; @@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags) return lsize * sizeof(*p); } -static void __init r3k_probe_cache(void) +static void __cpuinit r3k_probe_cache(void) { dcache_size = r3k_cache_size(ST0_ISC); if (dcache_size) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 643c8bcffff3..27096751ddce 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -446,6 +446,7 @@ static inline void local_r4k_flush_cache_page(void *args) struct page *page = pfn_to_page(fcp_args->pfn); int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; + int map_coherent = 0; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -479,7 +480,9 @@ static inline void local_r4k_flush_cache_page(void *args) * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ - if (cpu_has_dc_aliases) + map_coherent = (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)); + if (map_coherent) vaddr = kmap_coherent(page, addr); else vaddr = kmap_atomic(page, KM_USER0); @@ -502,7 +505,7 @@ static inline void local_r4k_flush_cache_page(void *args) } if (vaddr) { - if (cpu_has_dc_aliases) + if (map_coherent) kunmap_coherent(); else kunmap_atomic(vaddr, KM_USER0); @@ -1226,6 +1229,28 @@ void au1x00_fixup_config_od(void) } } +/* CP0 hazard avoidance. */ +#define NXP_BARRIER() \ + __asm__ __volatile__( \ + ".set noreorder\n\t" \ + "nop; nop; nop; nop; nop; nop;\n\t" \ + ".set reorder\n\t") + +static void nxp_pr4450_fixup_config(void) +{ + unsigned long config0; + + config0 = read_c0_config(); + + /* clear all three cache coherency fields */ + config0 &= ~(0x7 | (7 << 25) | (7 << 28)); + config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | + ((_page_cachable_default >> _CACHE_SHIFT) << 25) | + ((_page_cachable_default >> _CACHE_SHIFT) << 28)); + write_c0_config(config0); + NXP_BARRIER(); +} + static int __cpuinitdata cca = -1; static int __init cca_setup(char *str) @@ -1271,6 +1296,10 @@ static void __cpuinit coherency_setup(void) case CPU_AU1500: /* rev. AB */ au1x00_fixup_config_od(); break; + + case PRID_IMP_PR4450: + nxp_pr4450_fixup_config(); + break; } } diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 10dd2af2343b..8f2cd8eda741 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -116,4 +116,3 @@ EXPORT_SYMBOL(__kmap); EXPORT_SYMBOL(__kunmap); EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(__kunmap_atomic); -EXPORT_SYMBOL(__kmap_atomic_to_page); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index ecd562d2c348..137c14bafd6b 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -71,6 +71,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; +EXPORT_SYMBOL_GPL(empty_zero_page); /* * Not static inline because used by IP27 special magic initialization code diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index d827d6144369..1417c6494858 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void) } /* * Too much unrolling will overflow the available space in - * clear_space_array / copy_page_array. 8 words sounds generous, - * but a R4000 with 128 byte L2 line length can exceed even that. + * clear_space_array / copy_page_array. */ - half_clear_loop_size = min(8 * clear_word_size, + half_clear_loop_size = min(16 * clear_word_size, max(cache_line_size >> 1, 4 * clear_word_size)); - half_copy_loop_size = min(8 * copy_word_size, + half_copy_loop_size = min(16 * copy_word_size, max(cache_line_size >> 1, 4 * copy_word_size)); } @@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) if (pref_bias_clear_store) { uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, A0); - } else if (cpu_has_cache_cdex_s) { - uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); - } else if (cpu_has_cache_cdex_p) { - if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { - uasm_i_nop(buf); - uasm_i_nop(buf); - uasm_i_nop(buf); - uasm_i_nop(buf); - } + } else if (cache_line_size == (half_clear_loop_size << 1)) { + if (cpu_has_cache_cdex_s) { + uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); + } else if (cpu_has_cache_cdex_p) { + if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { + uasm_i_nop(buf); + uasm_i_nop(buf); + uasm_i_nop(buf); + uasm_i_nop(buf); + } - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - uasm_i_lw(buf, ZERO, ZERO, AT); + if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + uasm_i_lw(buf, ZERO, ZERO, AT); - uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); - } + uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); + } + } } void __cpuinit build_clear_page(void) @@ -310,8 +311,8 @@ void __cpuinit build_clear_page(void) if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, 0xa000); - off = min(8, pref_bias_clear_store / cache_line_size) * - cache_line_size; + off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) + * cache_line_size : 0; while (off) { build_clear_pref(&buf, -off); off -= cache_line_size; @@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off) if (pref_bias_copy_store) { uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, A0); - } else if (cpu_has_cache_cdex_s) { - uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); - } else if (cpu_has_cache_cdex_p) { - if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { - uasm_i_nop(buf); - uasm_i_nop(buf); - uasm_i_nop(buf); - uasm_i_nop(buf); - } + } else if (cache_line_size == (half_copy_loop_size << 1)) { + if (cpu_has_cache_cdex_s) { + uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); + } else if (cpu_has_cache_cdex_p) { + if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { + uasm_i_nop(buf); + uasm_i_nop(buf); + uasm_i_nop(buf); + uasm_i_nop(buf); + } - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - uasm_i_lw(buf, ZERO, ZERO, AT); + if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + uasm_i_lw(buf, ZERO, ZERO, AT); - uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); + uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); + } } } @@ -454,12 +457,14 @@ void __cpuinit build_copy_page(void) if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, 0xa000); - off = min(8, pref_bias_copy_load / cache_line_size) * cache_line_size; + off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * + cache_line_size : 0; while (off) { build_copy_load_pref(&buf, -off); off -= cache_line_size; } - off = min(8, pref_bias_copy_store / cache_line_size) * cache_line_size; + off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) * + cache_line_size : 0; while (off) { build_copy_store_pref(&buf, -off); off -= cache_line_size; diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index fc227f3b1199..e3abfb2d7e86 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c @@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size) /* * This function is executed in uncached address space. */ -static __init void __rm7k_sc_enable(void) +static __cpuinit void __rm7k_sc_enable(void) { int i; @@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void) } } -static __init void rm7k_sc_enable(void) +static __cpuinit void rm7k_sc_enable(void) { if (read_c0_config() & RM7K_CONF_SE) return; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 382738ca8a0b..76da73a5ab3c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -224,8 +224,9 @@ static u32 final_handler[64] __cpuinitdata; static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) { switch (current_cpu_type()) { - /* Found by experiment: R4600 v2.0 needs this, too. */ + /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ case CPU_R4600: + case CPU_R4700: case CPU_R5000: case CPU_R5000A: case CPU_NEVADA: |