diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2018-11-07 10:35:34 +0800 |
---|---|---|
committer | Ley Foon Tan <ley.foon.tan@intel.com> | 2019-03-07 05:29:35 +0800 |
commit | 3ac23944de570df7a6309425aeef063be38f37c4 (patch) | |
tree | 7c5a3a2fd805045dfbd4eaa507d25dbc89d9cd1c | |
parent | b6a10463438d8775aa6aa09ece46e8af14345712 (diff) |
nios2: update_mmu_cache preload the TLB with the new PTE
Rather than flush the TLB entry when installing a new PTE to allow
the fast TLB reload to re-fill the TLB, just refill the TLB entry
when removing the old one.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Ley Foon Tan <ley.foon.tan@intel.com>
-rw-r--r-- | arch/nios2/include/asm/tlbflush.h | 6 | ||||
-rw-r--r-- | arch/nios2/mm/cacheflush.c | 7 | ||||
-rw-r--r-- | arch/nios2/mm/tlb.c | 32 |
3 files changed, 36 insertions, 9 deletions
diff --git a/arch/nios2/include/asm/tlbflush.h b/arch/nios2/include/asm/tlbflush.h index 982a7ecf221f..b4bf487b9832 100644 --- a/arch/nios2/include/asm/tlbflush.h +++ b/arch/nios2/include/asm/tlbflush.h @@ -30,6 +30,9 @@ struct mm_struct; * - flush_tlb_page(vma, address) flushes a page * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_page(address) flushes a kernel page + * + * - reload_tlb_page(vma, address, pte) flushes the TLB for address like + * flush_tlb_page, then replaces it with a TLB for pte. */ extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); @@ -48,4 +51,7 @@ static inline void flush_tlb_kernel_page(unsigned long address) flush_tlb_kernel_range(address, address + PAGE_SIZE); } +extern void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); + #endif /* _ASM_NIOS2_TLBFLUSH_H */ diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index d58e7e80dc0d..65de1bd6a760 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -198,13 +198,14 @@ void flush_dcache_page(struct page *page) EXPORT_SYMBOL(flush_dcache_page); void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *pte) + unsigned long address, pte_t *ptep) { - unsigned long pfn = pte_pfn(*pte); + pte_t pte = *ptep; + unsigned long pfn = pte_pfn(pte); struct page *page; struct address_space *mapping; - flush_tlb_page(vma, address); + reload_tlb_page(vma, address, pte); if (!pfn_valid(pfn)) return; diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c index 2e49993d29ef..af8711885569 100644 --- a/arch/nios2/mm/tlb.c +++ b/arch/nios2/mm/tlb.c @@ -43,13 +43,11 @@ static unsigned long pteaddr_invalid(unsigned long addr) * This one is only used for pages with the global bit set so we don't care * much about the ASID. */ -void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) +static void replace_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, unsigned long tlbacc) { unsigned int way; unsigned long org_misc, pid_misc; - pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr); - /* remember pid/way until we return. */ get_misc_and_pid(&org_misc, &pid_misc); @@ -72,10 +70,11 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) if (pid != mmu_pid) continue; - tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT); + tlbmisc = mmu_pid | TLBMISC_WE | (way << TLBMISC_WAY_SHIFT); WRCTL(CTL_TLBMISC, tlbmisc); - WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); - WRCTL(CTL_TLBACC, 0); + if (tlbacc == 0) + WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); + WRCTL(CTL_TLBACC, tlbacc); /* * There should be only a single entry that maps a * particular {address,pid} so break after a match. @@ -86,6 +85,20 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) WRCTL(CTL_TLBMISC, org_misc); } +static void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) +{ + pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr); + + replace_tlb_one_pid(addr, mmu_pid, 0); +} + +static void reload_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, pte_t pte) +{ + pr_debug("Reload tlb-entry for vaddr=%#lx\n", addr); + + replace_tlb_one_pid(addr, mmu_pid, pte_val(pte)); +} + void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -97,6 +110,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } +void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); + + reload_tlb_one_pid(addr, mmu_pid, pte); +} + /* * This one is only used for pages with the global bit set so we don't care * much about the ASID. |