diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-05-30 23:57:05 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-05-30 23:57:05 -0500 |
commit | 5bc65793cbf8da0d35f19ef025dda22887e79e80 (patch) | |
tree | 8291998abd73055de6f487fafa174ee2a5d3afee /arch/arm/mm | |
parent | 6edae708bf77e012d855a7e2c7766f211d234f4f (diff) | |
parent | 3f0a6766e0cc5a577805732e5adb50a585c58175 (diff) |
[SCSI] Merge up to linux-2.6 head
Conflicts:
drivers/scsi/jazz_esp.c
Same changes made by both SCSI and SPARC trees: problem with UTF-8
conversion in the copyright.
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 13 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v7.S | 88 |
8 files changed, 106 insertions, 6 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 15f0284010ca..e7904bc92c73 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -351,6 +351,7 @@ config CPU_V6 select CPU_CACHE_V6 select CPU_CACHE_VIPT select CPU_CP15_MMU + select CPU_HAS_ASID select CPU_COPY_V6 if MMU select CPU_TLB_V6 if MMU @@ -376,8 +377,9 @@ config CPU_V7 select CPU_CACHE_V7 select CPU_CACHE_VIPT select CPU_CP15_MMU + select CPU_HAS_ASID select CPU_COPY_V6 if MMU - select CPU_TLB_V6 if MMU + select CPU_TLB_V7 if MMU # Figure out what processor architecture version we should be using. # This defines the compiler instruction set which depends on the machine type. @@ -496,8 +498,17 @@ config CPU_TLB_V4WBI config CPU_TLB_V6 bool +config CPU_TLB_V7 + bool + endif +config CPU_HAS_ASID + bool + help + This indicates whether the CPU has the ASID register; used to + tag TLB and possibly cache entries. + config CPU_CP15 bool help diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index b5bd335ff14a..762702765fc3 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o +obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 19ca333240ec..36440c899583 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -3,7 +3,7 @@ * * Copyright (C) 1995 Linus Torvalds * Modifications for ARM processor (c) 1995-2001 Russell King - * Thumb aligment fault fixups (c) 2004 MontaVista Software, Inc. + * Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc. * - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation. * Copyright (C) 1996, Cygnus Software Technologies Ltd. * diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index d6167ad4e011..f3ade18862aa 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -346,7 +346,7 @@ void __iounmap(volatile void __iomem *addr) #ifndef CONFIG_SMP /* * If this is a section based mapping we need to handle it - * specially as the VM subysystem does not know how to handle + * specially as the VM subsystem does not know how to handle * such a beast. We need the lock here b/c we need to clear * all the mappings before the area can be reclaimed * by someone else. diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2c4c2422cd1e..2728b0e7d2bb 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -5,7 +5,7 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/shm.h> - +#include <linux/sched.h> #include <asm/system.h> #define COLOUR_ALIGN(addr,pgoff) \ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 2ba1530d1ce1..02e050ae59f6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -92,7 +92,7 @@ static struct cachepolicy cache_policies[] __initdata = { }; /* - * These are useful for identifing cache coherency + * These are useful for identifying cache coherency * problems by allowing the cache or the cache and * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index dd823dd4a374..718f4782ee8b 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -256,7 +256,7 @@ __v7_proc_info: .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_v7_name .long v7_processor_functions - .long v6wbi_tlb_fns + .long v7wbi_tlb_fns .long v6_user_fns .long v7_cache_fns .size __v7_proc_info, . - __v7_proc_info diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S new file mode 100644 index 000000000000..b56dda8052f7 --- /dev/null +++ b/arch/arm/mm/tlb-v7.S @@ -0,0 +1,88 @@ +/* + * linux/arch/arm/mm/tlb-v7.S + * + * Copyright (C) 1997-2002 Russell King + * Modified for ARMv7 by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ARM architecture version 6 TLB handling functions. + * These assume a split I/D TLB. + */ +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/tlbflush.h> +#include "proc-macros.S" + +/* + * v7wbi_flush_user_tlb_range(start, end, vma) + * + * Invalidate a range of TLB entries in the specified address space. + * + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + * - vma - vma_struct describing address range + * + * It is assumed that: + * - the "Invalidate single entry" instruction will invalidate + * both the I and the D TLBs on Harvard-style TLBs + */ +ENTRY(v7wbi_flush_user_tlb_range) + vma_vm_mm r3, r2 @ get vma->vm_mm + mmid r3, r3 @ get vm_mm->context.id + dsb + mov r0, r0, lsr #PAGE_SHIFT @ align address + mov r1, r1, lsr #PAGE_SHIFT + asid r3, r3 @ mask ASID + orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA + mov r1, r1, lsl #PAGE_SHIFT + vma_vm_flags r2, r2 @ get vma->vm_flags +1: + mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) + tst r2, #VM_EXEC @ Executable area ? + mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) + add r0, r0, #PAGE_SZ + cmp r0, r1 + blo 1b + mov ip, #0 + mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB + dsb + mov pc, lr + +/* + * v7wbi_flush_kern_tlb_range(start,end) + * + * Invalidate a range of kernel TLB entries + * + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + */ +ENTRY(v7wbi_flush_kern_tlb_range) + dsb + mov r0, r0, lsr #PAGE_SHIFT @ align address + mov r1, r1, lsr #PAGE_SHIFT + mov r0, r0, lsl #PAGE_SHIFT + mov r1, r1, lsl #PAGE_SHIFT +1: + mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA + mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA + add r0, r0, #PAGE_SZ + cmp r0, r1 + blo 1b + mov r2, #0 + mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB + dsb + isb + mov pc, lr + + .section ".text.init", #alloc, #execinstr + + .type v7wbi_tlb_fns, #object +ENTRY(v7wbi_tlb_fns) + .long v7wbi_flush_user_tlb_range + .long v7wbi_flush_kern_tlb_range + .long v6wbi_tlb_flags + .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |