summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c147
-rw-r--r--arch/mips/mm/cache.c4
-rw-r--r--arch/mips/mm/init.c12
-rw-r--r--arch/mips/mm/sc-mips.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c5
-rw-r--r--arch/mips/mm/tlbex.c6
6 files changed, 160 insertions, 16 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index c14259edd53f..1c74a6ad072a 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -57,7 +57,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
preempt_enable();
}
-#if defined(CONFIG_MIPS_CMP)
+#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
#define cpu_has_safe_index_cacheops 0
#else
#define cpu_has_safe_index_cacheops 1
@@ -123,6 +123,28 @@ static void r4k_blast_dcache_page_setup(void)
r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
}
+#ifndef CONFIG_EVA
+#define r4k_blast_dcache_user_page r4k_blast_dcache_page
+#else
+
+static void (*r4k_blast_dcache_user_page)(unsigned long addr);
+
+static void r4k_blast_dcache_user_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_user_page = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_user_page = blast_dcache16_user_page;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_user_page = blast_dcache32_user_page;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache_user_page = blast_dcache64_user_page;
+}
+
+#endif
+
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
static void r4k_blast_dcache_page_indexed_setup(void)
@@ -245,6 +267,27 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = blast_icache64_page;
}
+#ifndef CONFIG_EVA
+#define r4k_blast_icache_user_page r4k_blast_icache_page
+#else
+
+static void (*r4k_blast_icache_user_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_icache_user_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_user_page = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_user_page = blast_icache16_user_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_user_page = blast_icache32_user_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_user_page = blast_icache64_user_page;
+}
+
+#endif
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
@@ -355,6 +398,7 @@ static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
+ case CPU_LOONGSON3:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -519,7 +563,8 @@ static inline void local_r4k_flush_cache_page(void *args)
}
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(addr);
+ vaddr ? r4k_blast_dcache_page(addr) :
+ r4k_blast_dcache_user_page(addr);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page(addr);
}
@@ -530,7 +575,8 @@ static inline void local_r4k_flush_cache_page(void *args)
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm, cpu);
} else
- r4k_blast_icache_page(addr);
+ vaddr ? r4k_blast_icache_page(addr) :
+ r4k_blast_icache_user_page(addr);
}
if (vaddr) {
@@ -595,6 +641,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
break;
}
}
+#ifdef CONFIG_EVA
+ /*
+ * Due to all possible segment mappings, there might cache aliases
+ * caused by the bootloader being in non-EVA mode, and the CPU switching
+ * to EVA during early kernel init. It's best to flush the scache
+ * to avoid having secondary cores fetching stale data and lead to
+ * kernel crashes.
+ */
+ bc_wback_inv(start, (end - start));
+ __sync();
+#endif
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -617,7 +674,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
instruction_hazard();
}
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
@@ -688,7 +745,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
bc_inv(addr, size);
__sync();
}
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
/*
* While we're protected against bad userland addresses we don't care
@@ -1010,6 +1067,33 @@ static void probe_pcache(void)
c->dcache.waybit = 0;
break;
+ case CPU_LOONGSON3:
+ config1 = read_c0_config1();
+ lsize = (config1 >> 19) & 7;
+ if (lsize)
+ c->icache.linesz = 2 << lsize;
+ else
+ c->icache.linesz = 0;
+ c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+ icache_size = c->icache.sets *
+ c->icache.ways *
+ c->icache.linesz;
+ c->icache.waybit = 0;
+
+ lsize = (config1 >> 10) & 7;
+ if (lsize)
+ c->dcache.linesz = 2 << lsize;
+ else
+ c->dcache.linesz = 0;
+ c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ c->dcache.ways = 1 + ((config1 >> 7) & 7);
+ dcache_size = c->dcache.sets *
+ c->dcache.ways *
+ c->dcache.linesz;
+ c->dcache.waybit = 0;
+ break;
+
default:
if (!(config & MIPS_CONF_M))
panic("Don't know how to probe P-caches on this cpu.");
@@ -1113,13 +1197,21 @@ static void probe_pcache(void)
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_1074K:
case CPU_INTERAPTIV:
+ case CPU_P5600:
case CPU_PROAPTIV:
- if (current_cpu_type() == CPU_74K)
+ case CPU_M5150:
+ if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))
alias_74k_erratum(c);
- if ((read_c0_config7() & (1 << 16))) {
- /* effectively physically indexed dcache,
- thus no virtual aliases. */
+ if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
+ (c->icache.waysize > PAGE_SIZE))
+ c->icache.flags |= MIPS_CACHE_ALIASES;
+ if (read_c0_config7() & MIPS_CONF7_AR) {
+ /*
+ * Effectively physically indexed dcache,
+ * thus no virtual aliases.
+ */
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
}
@@ -1239,6 +1331,33 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+static void __init loongson3_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config2, lsize;
+
+ config2 = read_c0_config2();
+ lsize = (config2 >> 4) & 15;
+ if (lsize)
+ c->scache.linesz = 2 << lsize;
+ else
+ c->scache.linesz = 0;
+ c->scache.sets = 64 << ((config2 >> 8) & 15);
+ c->scache.ways = 1 + (config2 & 15);
+
+ scache_size = c->scache.sets *
+ c->scache.ways *
+ c->scache.linesz;
+ /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
+ scache_size *= 4;
+ c->scache.waybit = 0;
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+ if (scache_size)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+ return;
+}
+
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
extern int mips_sc_init(void);
@@ -1291,6 +1410,10 @@ static void setup_scache(void)
loongson2_sc_init();
return;
+ case CPU_LOONGSON3:
+ loongson3_sc_init();
+ return;
+
case CPU_XLP:
/* don't need to worry about L2, fully coherent */
return;
@@ -1461,6 +1584,10 @@ void r4k_cache_init(void)
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
+#ifdef CONFIG_EVA
+ r4k_blast_dcache_user_page_setup();
+ r4k_blast_icache_user_page_setup();
+#endif
/*
* Some MIPS32 and MIPS64 processors have physically indexed caches.
@@ -1492,7 +1619,7 @@ void r4k_cache_init(void)
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
-#if defined(CONFIG_DMA_NONCOHERENT)
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
if (coherentio) {
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index fde7e56d13fe..e422b38d3113 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -58,7 +58,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
/*
* We could optimize the case where the cache argument is not BCACHE but
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 6b59617760c1..4fc74c78265a 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -422,10 +422,20 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
+void (*free_init_pages_eva)(void *begin, void *end) = NULL;
+
void __init_refok free_initmem(void)
{
prom_free_prom_memory();
- free_initmem_default(POISON_FREE_INITMEM);
+ /*
+ * Let the platform define a specific function to free the
+ * init section since EVA may have used any possible mapping
+ * between virtual and physical addresses.
+ */
+ if (free_init_pages_eva)
+ free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
+ else
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 7a56aee5fce7..99eb8fabab60 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -76,8 +76,10 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
+ case CPU_P5600:
case CPU_BMIPS5000:
if (config2 & (1 << 12))
return 0;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index ae4ca2450707..eeaf50f5df2b 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -48,13 +48,14 @@ extern void build_tlb_refill_handler(void);
#endif /* CONFIG_MIPS_MT_SMTC */
/*
- * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
- * unfortrunately, itlb is not totally transparent to software.
+ * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
+ * unfortunately, itlb is not totally transparent to software.
*/
static inline void flush_itlb(void)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
+ case CPU_LOONGSON3:
write_c0_diag(4);
break;
default:
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b234b1b5ccad..ee88367ab3ad 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -509,7 +509,10 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
switch (current_cpu_type()) {
case CPU_M14KC:
case CPU_74K:
+ case CPU_1074K:
case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_M5150:
break;
default:
@@ -579,6 +582,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_BMIPS4380:
case CPU_BMIPS5000:
case CPU_LOONGSON2:
+ case CPU_LOONGSON3:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
@@ -621,7 +625,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
default:
panic("No TLB refill handler yet (CPU type: %d)",
- current_cpu_data.cputype);
+ current_cpu_type());
break;
}
}