summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mm/flush.c52
-rw-r--r--include/asm-arm/cacheflush.h7
2 files changed, 58 insertions, 1 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 191788fb18d1..b0208c992576 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -16,6 +16,58 @@
#include <asm/tlbflush.h>
#ifdef CONFIG_CPU_CACHE_VIPT
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+ if (cache_is_vivt()) {
+ if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ __cpuc_flush_user_all();
+ return;
+ }
+
+ if (cache_is_vipt_aliasing()) {
+ asm( "mcr p15, 0, %0, c7, c14, 0\n"
+ " mcr p15, 0, %0, c7, c5, 0\n"
+ " mcr p15, 0, %0, c7, c10, 4"
+ :
+ : "r" (0)
+ : "cc");
+ }
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ if (cache_is_vivt()) {
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
+ vma->vm_flags);
+ return;
+ }
+
+ if (cache_is_vipt_aliasing()) {
+ asm( "mcr p15, 0, %0, c7, c14, 0\n"
+ " mcr p15, 0, %0, c7, c5, 0\n"
+ " mcr p15, 0, %0, c7, c10, 4"
+ :
+ : "r" (0)
+ : "cc");
+ }
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+{
+ if (cache_is_vivt()) {
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ unsigned long addr = user_addr & PAGE_MASK;
+ __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ }
+ return;
+ }
+
+ if (cache_is_vipt_aliasing())
+ flush_pfn_alias(pfn, user_addr);
+}
+
#define ALIAS_FLUSH_START 0xffff4000
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index 035cdcff43d2..e81baff4f54b 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -256,7 +256,7 @@ extern void dmac_flush_range(unsigned long, unsigned long);
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
-
+#ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
@@ -279,6 +279,11 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
}
+#else
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+#endif
/*
* flush_cache_user_range is used when we want to ensure that the