diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-12-25 19:28:54 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-02-13 10:54:45 +0900 |
commit | ea9af69481730e3d712104dfd549ba6c8ddd29f1 (patch) | |
tree | b6b0369a186f3df1b8061791c1cbc2e7fd161104 | |
parent | 11c1965687b0a472add948d4240dfe65a2fcb298 (diff) |
sh: Local TLB flushing variants for SMP prep.
Rename the existing flush routines to local_ variants for use by
the IPI-backed global flush routines on SMP.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/sh_ksyms.c | 1 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 20 | ||||
-rw-r--r-- | arch/sh/mm/tlb-nommu.c | 19 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 2 | ||||
-rw-r--r-- | include/asm-sh/tlbflush.h | 38 |
8 files changed, 52 insertions, 36 deletions
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c index e6106239a0fe..fe1b276c97c6 100644 --- a/arch/sh/kernel/sh_ksyms.c +++ b/arch/sh/kernel/sh_ksyms.c @@ -105,7 +105,6 @@ EXPORT_SYMBOL(__flush_purge_region); EXPORT_SYMBOL(clear_user_page); #endif -EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(__down_trylock); #ifdef CONFIG_SMP diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d172065182fb..ae957a932375 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -106,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); - __flush_tlb_page(get_asid(), addr); + flush_tlb_one(get_asid(), addr); } /* diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index b529d809dd4b..969efeceb928 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -39,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); - __flush_tlb_page(get_asid(), p3_addr); + flush_tlb_one(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); @@ -74,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address, mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); - __flush_tlb_page(get_asid(), p3_addr); + flush_tlb_one(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index b829c17c1d17..dcaf98e82be8 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c @@ -14,7 +14,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned int cpu = smp_processor_id(); @@ -31,15 +31,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) saved_asid = get_asid(); set_asid(asid); } - __flush_tlb_page(asid, page); + flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } } -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned int cpu = smp_processor_id(); @@ -67,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, set_asid(asid); } while (start < end) { - __flush_tlb_page(asid, start); + flush_tlb_one(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) @@ -77,7 +77,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } -void flush_tlb_kernel_range(unsigned long start, unsigned long end) +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned int cpu = smp_processor_id(); unsigned long flags; @@ -86,7 +86,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - flush_tlb_all(); + local_flush_tlb_all(); } else { unsigned long asid; unsigned long saved_asid = get_asid(); @@ -97,7 +97,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) end &= PAGE_MASK; set_asid(asid); while (start < end) { - __flush_tlb_page(asid, start); + flush_tlb_one(asid, start); start += PAGE_SIZE; } set_asid(saved_asid); @@ -105,7 +105,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) local_irq_restore(flags); } -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm) { unsigned int cpu = smp_processor_id(); @@ -122,7 +122,7 @@ void flush_tlb_mm(struct mm_struct *mm) } } -void flush_tlb_all(void) +void local_flush_tlb_all(void) { unsigned long flags, status; diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index e55cfea01092..1ccca7c0532e 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -13,39 +13,33 @@ /* * Nothing too terribly exciting here .. */ - -void flush_tlb(void) -{ - BUG(); -} - -void flush_tlb_all(void) +void local_flush_tlb_all(void) { BUG(); } -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm) { BUG(); } -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { BUG(); } -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { BUG(); } -void __flush_tlb_page(unsigned long asid, unsigned long page) +void local_flush_tlb_one(unsigned long asid, unsigned long page) { BUG(); } -void flush_tlb_kernel_range(unsigned long start, unsigned long end) +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { BUG(); } @@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma, { BUG(); } - diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 598c998dba5c..e5e76eb7ee09 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -12,7 +12,7 @@ #include <asm/system.h> #include <asm/mmu_context.h> -void __flush_tlb_page(unsigned long asid, unsigned long page) +void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long addr, data; int i, ways = MMU_NTLB_WAYS; diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 758d8dec622b..221e7095473d 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -12,7 +12,7 @@ #include <asm/system.h> #include <asm/mmu_context.h> -void __flush_tlb_page(unsigned long asid, unsigned long page) +void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long addr, data; diff --git a/include/asm-sh/tlbflush.h b/include/asm-sh/tlbflush.h index 28c073b0fbab..455fb8da441e 100644 --- a/include/asm-sh/tlbflush.h +++ b/include/asm-sh/tlbflush.h @@ -4,7 +4,6 @@ /* * TLB flushing: * - * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page @@ -12,20 +11,45 @@ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); +extern void local_flush_tlb_kernel_range(unsigned long start, + unsigned long end); +extern void local_flush_tlb_one(unsigned long asid, unsigned long page); + +#ifdef CONFIG_SMP -extern void flush_tlb(void); extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); -extern void __flush_tlb_page(unsigned long asid, unsigned long page); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_tlb_one(unsigned long asid, unsigned long page); + +#else + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page) + +#define flush_tlb_range(vma, start, end) \ + local_flush_tlb_range(vma, start, end) + +#define flush_tlb_kernel_range(start, end) \ + local_flush_tlb_kernel_range(start, end) + +#endif /* CONFIG_SMP */ static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) -{ /* Nothing to do */ +{ + /* Nothing to do */ } - -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); - #endif /* __ASM_SH_TLBFLUSH_H */ |