diff options
author | Guo Ren <ren_guo@c-sky.com> | 2018-09-05 14:25:10 +0800 |
---|---|---|
committer | Guo Ren <ren_guo@c-sky.com> | 2018-10-25 23:36:19 +0800 |
commit | 00a9730e1007c6cc87a7c78af2f24a4105d616ee (patch) | |
tree | c014e5a0606a7a88b6e3493f49862c040f9aeea8 /arch/csky/abiv1 | |
parent | 4859bfca11c7d63d55175bcd85a75d6cee4b7184 (diff) |
csky: Cache and TLB routines
This patch adds cache and tlb sync codes for abiv1 & abiv2.
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/csky/abiv1')
-rw-r--r-- | arch/csky/abiv1/cacheflush.c | 52 | ||||
-rw-r--r-- | arch/csky/abiv1/inc/abi/cacheflush.h | 49 |
2 files changed, 101 insertions, 0 deletions
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c new file mode 100644 index 000000000000..10af8b6fe322 --- /dev/null +++ b/arch/csky/abiv1/cacheflush.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/syscalls.h> +#include <linux/spinlock.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/cacheflush.h> +#include <asm/cachectl.h> + +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + unsigned long addr; + + if (mapping && !mapping_mapped(mapping)) { + set_bit(PG_arch_1, &(page)->flags); + return; + } + + /* + * We could delay the flush for the !page_mapping case too. But that + * case is for exec env/arg pages and those are %99 certainly going to + * get faulted into the tlb (and thus flushed) anyways. + */ + addr = (unsigned long) page_address(page); + dcache_wb_range(addr, addr + PAGE_SIZE); +} + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) +{ + unsigned long addr; + struct page *page; + unsigned long pfn; + + pfn = pte_pfn(*pte); + if (unlikely(!pfn_valid(pfn))) + return; + + page = pfn_to_page(pfn); + addr = (unsigned long) page_address(page); + + if (vma->vm_flags & VM_EXEC || + pages_do_alias(addr, address & PAGE_MASK)) + cache_wbinv_all(); + + clear_bit(PG_arch_1, &(page)->flags); +} diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h new file mode 100644 index 000000000000..5f663aef9b1b --- /dev/null +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +#include <linux/compiler.h> +#include <asm/string.h> +#include <asm/cache.h> + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *); + +#define flush_cache_mm(mm) cache_wbinv_all() +#define flush_cache_page(vma, page, pfn) cache_wbinv_all() +#define flush_cache_dup_mm(mm) cache_wbinv_all() + +/* + * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. + * Use cache_wbinv_all() here and need to be improved in future. + */ +#define flush_cache_range(vma, start, end) cache_wbinv_all() +#define flush_cache_vmap(start, end) cache_wbinv_range(start, end) +#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) + +#define flush_icache_page(vma, page) cache_wbinv_all() +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +#define flush_icache_user_range(vma, pg, adr, len) \ + cache_wbinv_range(adr, adr + len) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + cache_wbinv_all(); \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + cache_wbinv_all(); \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ |