diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-12-01 15:39:51 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-12-01 15:39:51 +0900 |
commit | 55661fc1f105ed75852e937bf8ea408270eb0cca (patch) | |
tree | aa96c0c6dd0a8230f9373cab32cd069564d27d23 /arch/sh/mm/cache.c | |
parent | 22a5b566c8c442b0b35b3b106795e2f2b3578096 (diff) |
sh: Assume new page cache pages have dirty dcache lines.
This follows the ARM change c01778001a4f5ad9c62d882776235f3f31922fdd
("ARM: 6379/1: Assume new page cache pages have dirty D-cache") for the
same rationale:
There are places in Linux where writes to newly allocated page
cache pages happen without a subsequent call to flush_dcache_page()
(several PIO drivers including USB HCD). This patch changes the
meaning of PG_arch_1 to be PG_dcache_clean and always flush the
D-cache for a newly mapped page in update_mmu_cache().
This addresses issues seen with executing binaries from MMC, in
addition to some of the other HCDs that don't explicitly do cache
management for their pipe-in buffers.
Requested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r-- | arch/sh/mm/cache.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index ba401d137bb9..88d3dc3d30d5 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long len) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && - !test_bit(PG_dcache_dirty, &page->flags)) { + test_bit(PG_dcache_clean, &page->flags)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(vto); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) - set_bit(PG_dcache_dirty, &page->flags); + clear_bit(PG_dcache_clean, &page->flags); } if (vma->vm_flags & VM_EXEC) @@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long len) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && - !test_bit(PG_dcache_dirty, &page->flags)) { + test_bit(PG_dcache_clean, &page->flags)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(vfrom); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) - set_bit(PG_dcache_dirty, &page->flags); + clear_bit(PG_dcache_clean, &page->flags); } } @@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from, vto = kmap_atomic(to, KM_USER1); if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && - !test_bit(PG_dcache_dirty, &from->flags)) { + test_bit(PG_dcache_clean, &from->flags)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(vfrom); @@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma, page = pfn_to_page(pfn); if (pfn_valid(pfn)) { - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); if (dirty) __flush_purge_region(page_address(page), PAGE_SIZE); } @@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) if (pages_do_alias(addr, vmaddr)) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && - !test_bit(PG_dcache_dirty, &page->flags)) { + test_bit(PG_dcache_clean, &page->flags)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); |