From 3c5f7e7b4a0346de670b08f595bd15e7eec91f97 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 31 May 2011 15:38:43 +0100 Subject: ARM: Use TTBR1 instead of reserved context ID On ARMv7 CPUs that cache first level page table entries (like the Cortex-A15), using a reserved ASID while changing the TTBR or flushing the TLB is unsafe. This is because the CPU may cache the first level entry as the result of a speculative memory access while the reserved ASID is assigned. After the process owning the page tables dies, the memory will be reallocated and may be written with junk values which can be interpreted as global, valid PTEs by the processor. This will result in the TLB being populated with bogus global entries. This patch avoids the use of a reserved context ID in the v7 switch_mm and ASID rollover code by temporarily using the swapper_pg_dir pointed at by TTBR1, which contains only global entries that are not tagged with ASIDs. Reviewed-by: Frank Rowand Tested-by: Marc Zyngier Signed-off-by: Will Deacon [catalin.marinas@arm.com: add LPAE support] Signed-off-by: Catalin Marinas --- arch/arm/mm/context.c | 45 ++++++++++++++++++++++++++------------------ arch/arm/mm/proc-v7-2level.S | 10 ++++------ 2 files changed, 31 insertions(+), 24 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index ee9bb363d606..aaa291fc072e 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -23,25 +23,37 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); #endif #ifdef CONFIG_ARM_LPAE -#define cpu_set_asid(asid) { \ - unsigned long ttbl, ttbh; \ - asm volatile( \ - " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ - " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ - " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ - : "=&r" (ttbl), "=&r" (ttbh) \ - : "r" (asid & ~ASID_MASK)); \ +static void cpu_set_reserved_ttbr0(void) +{ + unsigned long ttbl = __pa(swapper_pg_dir); + unsigned long ttbh = 0; + + /* + * Set TTBR0 to swapper_pg_dir which contains only global entries. The + * ASID is set to 0. + */ + asm volatile( + " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" + : + : "r" (ttbl), "r" (ttbh)); + isb(); } #else -#define cpu_set_asid(asid) \ - asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) +static void cpu_set_reserved_ttbr0(void) +{ + u32 ttb; + /* Copy TTBR1 into TTBR0 */ + asm volatile( + " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" + " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" + : "=r" (ttb)); + isb(); +} #endif /* * We fork()ed a process, and we need a new context for the child - * to run in. We reserve version 0 for initial tasks so we will - * always allocate an ASID. The ASID 0 is reserved for the TTBR - * register changing sequence. + * to run in. */ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -51,9 +63,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) static void flush_context(void) { - /* set the reserved ASID before flushing the TLB */ - cpu_set_asid(0); - isb(); + cpu_set_reserved_ttbr0(); local_flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { __flush_icache_all(); @@ -114,8 +124,7 @@ static void reset_context(void *info) set_mm_context(mm, asid); /* set the new ASID */ - cpu_set_asid(mm->context.id); - isb(); + cpu_switch_mm(mm->pgd, mm); } #else diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 3a4b3e7b888c..72270482a922 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -46,18 +46,16 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif -#ifdef CONFIG_ARM_ERRATA_754322 - dsb -#endif - mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID - isb -1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 + mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 isb #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID isb + mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + isb #endif mov pc, lr ENDPROC(cpu_v7_switch_mm) -- cgit v1.2.3 From 7fec1b57b8a925d83c194f995f83d9f8442fd48e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 28 Nov 2011 13:53:28 +0000 Subject: ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs Since the ASIDs must be unique to an mm across all the CPUs in a system, the __new_context() function needs to broadcast a context reset event to all the CPUs during ASID allocation if a roll-over occurred. Such IPIs cannot be issued with interrupts disabled and ARM had to define __ARCH_WANT_INTERRUPTS_ON_CTXSW. This patch changes the check_context() function to check_and_switch_context() called from switch_mm(). In case of ASID-capable CPUs (ARMv6 onwards), if a new ASID is needed and the interrupts are disabled, it defers the __new_context() and cpu_switch_mm() calls to the post-lock switch hook where the interrupts are enabled. Setting the reserved TTBR0 was also moved to check_and_switch_context() from cpu_v7_switch_mm(). Reviewed-by: Will Deacon Tested-by: Will Deacon Reviewed-by: Frank Rowand Tested-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/include/asm/mmu.h | 2 ++ arch/arm/include/asm/mmu_context.h | 72 +++++++++++++++++++++++++++++--------- arch/arm/include/asm/thread_info.h | 1 + arch/arm/mm/context.c | 4 +-- arch/arm/mm/proc-v7-2level.S | 3 -- 5 files changed, 61 insertions(+), 21 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b8e580a297e4..20b43d6f23b3 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -39,6 +39,8 @@ typedef struct { * so enable interrupts over the context switch to avoid high * latency. */ +#ifndef CONFIG_CPU_HAS_ASID #define __ARCH_WANT_INTERRUPTS_ON_CTXSW +#endif #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a0b3cac0547c..94e265cb5146 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -49,39 +49,80 @@ DECLARE_PER_CPU(struct mm_struct *, current_mm); void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); +void cpu_set_reserved_ttbr0(void); -static inline void check_context(struct mm_struct *mm) +static inline void switch_new_context(struct mm_struct *mm) { - /* - * This code is executed with interrupts enabled. Therefore, - * mm->context.id cannot be updated to the latest ASID version - * on a different CPU (and condition below not triggered) - * without first getting an IPI to reset the context. The - * alternative is to take a read_lock on mm->context.id_lock - * (after changing its type to rwlock_t). - */ - if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - __new_context(mm); + unsigned long flags; + __new_context(mm); + + local_irq_save(flags); + cpu_switch_mm(mm->pgd, mm); + local_irq_restore(flags); +} + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); + + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); + + if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) + /* + * The ASID is from the current generation, just switch to the + * new pgd. This condition is only true for calls from + * context_switch() and interrupts are already disabled. + */ + cpu_switch_mm(mm->pgd, mm); + else if (irqs_disabled()) + /* + * Defer the new ASID allocation until after the context + * switch critical region since __new_context() cannot be + * called with interrupts disabled (it sends IPIs). + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + /* + * That is a direct call to switch_mm() or activate_mm() with + * interrupts enabled and a new context. + */ + switch_new_context(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) -#else +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) + switch_new_context(current->mm); +} -static inline void check_context(struct mm_struct *mm) +#else /* !CONFIG_CPU_HAS_ASID */ + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) { #ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); + cpu_switch_mm(mm->pgd, mm); #endif } #define init_new_context(tsk,mm) 0 -#endif +#define finish_arch_post_lock_switch() do { } while (0) + +#endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) @@ -123,8 +164,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); *crt_mm = next; #endif - check_context(next); - cpu_switch_mm(next->pgd, next); + check_and_switch_context(next, tsk); if (cache_is_vivt()) cpumask_clear_cpu(cpu, mm_cpumask(prev)); } diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d4c24d412a8d..9e13e33ec746 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *); #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 #define TIF_SECCOMP 21 +#define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index aaa291fc072e..06a2e7ce23c3 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -23,7 +23,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); #endif #ifdef CONFIG_ARM_LPAE -static void cpu_set_reserved_ttbr0(void) +void cpu_set_reserved_ttbr0(void) { unsigned long ttbl = __pa(swapper_pg_dir); unsigned long ttbh = 0; @@ -39,7 +39,7 @@ static void cpu_set_reserved_ttbr0(void) isb(); } #else -static void cpu_set_reserved_ttbr0(void) +void cpu_set_reserved_ttbr0(void) { u32 ttb; /* Copy TTBR1 into TTBR0 */ diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 72270482a922..42ac069c8012 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -46,9 +46,6 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif - mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 - mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 - isb #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif -- cgit v1.2.3 From e323969ccda2d69f02e047c08b03faa09215c72a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 28 Nov 2011 15:59:10 +0000 Subject: ARM: Remove current_mm per-cpu variable The current_mm variable was used to store the new mm between the switch_mm() and switch_to() calls where an IPI to reset the context could have set the wrong mm. Since the interrupts are disabled during context switch, there is no need for this variable, current->active_mm already points to the current mm when interrupts are re-enabled. Reviewed-by: Will Deacon Tested-by: Will Deacon Reviewed-by: Frank Rowand Tested-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/include/asm/mmu_context.h | 7 ------- arch/arm/mm/context.c | 12 +----------- 2 files changed, 1 insertion(+), 18 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 94e265cb5146..8da4b9c042fe 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,9 +43,6 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; -#ifdef CONFIG_SMP -DECLARE_PER_CPU(struct mm_struct *, current_mm); -#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); @@ -160,10 +157,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { -#ifdef CONFIG_SMP - struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); - *crt_mm = next; -#endif check_and_switch_context(next, tsk); if (cache_is_vivt()) cpumask_clear_cpu(cpu, mm_cpumask(prev)); diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 06a2e7ce23c3..806cc4f63516 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -18,9 +18,6 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; -#ifdef CONFIG_SMP -DEFINE_PER_CPU(struct mm_struct *, current_mm); -#endif #ifdef CONFIG_ARM_LPAE void cpu_set_reserved_ttbr0(void) @@ -108,14 +105,7 @@ static void reset_context(void *info) { unsigned int asid; unsigned int cpu = smp_processor_id(); - struct mm_struct *mm = per_cpu(current_mm, cpu); - - /* - * Check if a current_mm was set on this CPU as it might still - * be in the early booting stages and using the reserved ASID. - */ - if (!mm) - return; + struct mm_struct *mm = current->active_mm; smp_rmb(); asid = cpu_last_asid + cpu + 1; -- cgit v1.2.3 From f0c4b8d653f5ee091fb8d4d02ed7eaad397491bb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 20 Apr 2012 17:20:08 +0100 Subject: ARM: 7396/1: errata: only handle ARM erratum #326103 on affected cores Erratum #326103 ("FSR write bit incorrect on a SWP to read-only memory") only affects the ARM 1136 core prior to r1p0. The workaround disassembles the faulting instruction to determine whether it was a read or write access on all v6 cores. An issue has been reported on the ARM 11MPCore whereby loading the faulting instruction may happen in parallel with that page being unmapped, resulting in a deadlock due to the lack of TLB broadcasting in hardware: http://lists.infradead.org/pipermail/linux-arm-kernel/2012-March/091561.html This patch limits the workaround so that it is only used on affected cores, which are known to be UP only. Other v6 cores can rely on the FSR to indicate the access type correctly. Cc: stable@vger.kernel.org Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 9 +++++++++ arch/arm/mm/abort-ev6.S | 17 +++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cf006d40342c..36586dba6fa6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1186,6 +1186,15 @@ if !MMU source "arch/arm/Kconfig-nommu" endif +config ARM_ERRATA_326103 + bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" + depends on CPU_V6 + help + Executing a SWP instruction to read-only memory does not set bit 11 + of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to + treat the access as a read, preventing a COW from occurring and + causing the faulting task to livelock. + config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" depends on CPU_V6 || CPU_V6K diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index ff1f7cc11f87..80741992a9fc 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -26,18 +26,23 @@ ENTRY(v6_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* - * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). - * The test below covers all the write situations, including Java bytecodes + * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. */ - bic r1, r1, #1 << 11 @ clear bit 11 of FSR +#ifdef CONFIG_ARM_ERRATA_326103 + ldr ip, =0x4107b36 + mrc p15, 0, r3, c0, c0, 0 @ get processor id + teq ip, r3, lsr #4 @ r0 ARM1136? + bne do_DataAbort tst r5, #PSR_J_BIT @ Java? + tsteq r5, #PSR_T_BIT @ Thumb? bne do_DataAbort - do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 - ldreq r3, [r4] @ read aborted ARM instruction + bic r1, r1, #1 << 11 @ clear bit 11 of FSR + ldr r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r3, r3 + rev r3, r3 #endif do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. +#endif b do_DataAbort -- cgit v1.2.3 From f154fe9b806574437b47f08e924ad10c0e240b23 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 20 Apr 2012 17:21:08 +0100 Subject: ARM: 7397/1: l2x0: only apply workaround for erratum #753970 on PL310 The workaround for PL310 erratum #753970 can lead to deadlock on systems with an L220 cache controller. This patch makes the workaround effective only when the cache controller is identified as a PL310 at probe time. Cc: stable@vger.kernel.org Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a53fd2aaa2f4..a8d02c048a1f 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -32,6 +32,7 @@ static void __iomem *l2x0_base; static DEFINE_RAW_SPINLOCK(l2x0_lock); static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_size; +static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; struct l2x0_regs l2x0_saved_regs; @@ -61,12 +62,7 @@ static inline void cache_sync(void) { void __iomem *base = l2x0_base; -#ifdef CONFIG_PL310_ERRATA_753970 - /* write to an unmmapped register */ - writel_relaxed(0, base + L2X0_DUMMY_REG); -#else - writel_relaxed(0, base + L2X0_CACHE_SYNC); -#endif + writel_relaxed(0, base + sync_reg_offset); cache_wait(base + L2X0_CACHE_SYNC, 1); } @@ -331,6 +327,10 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) else ways = 8; type = "L310"; +#ifdef CONFIG_PL310_ERRATA_753970 + /* Unmapped register. */ + sync_reg_offset = L2X0_DUMMY_REG; +#endif break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; -- cgit v1.2.3 From ab4d536890853ab6675ede65db40e2c0980cb0ea Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 20 Apr 2012 17:22:11 +0100 Subject: ARM: 7398/1: l2x0: only write to debug registers on PL310 PL310 errata #588369 and #727915 require writes to the debug registers of the cache controller to work around known problems. Writing these registers on L220 may cause deadlock, so ensure that we only perform this operation when we identify a PL310 at probe time. Cc: stable@vger.kernel.org Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a8d02c048a1f..2a8e380501e8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -81,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr) } #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +static inline void debug_writel(unsigned long val) +{ + if (outer_cache.set_debug) + outer_cache.set_debug(val); +} -#define debug_writel(val) outer_cache.set_debug(val) - -static void l2x0_set_debug(unsigned long val) +static void pl310_set_debug(unsigned long val) { writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } @@ -94,7 +97,7 @@ static inline void debug_writel(unsigned long val) { } -#define l2x0_set_debug NULL +#define pl310_set_debug NULL #endif #ifdef CONFIG_PL310_ERRATA_588369 @@ -331,6 +334,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) /* Unmapped register. */ sync_reg_offset = L2X0_DUMMY_REG; #endif + outer_cache.set_debug = pl310_set_debug; break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; @@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; - outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", -- cgit v1.2.3 From 14904927fcef6bb881fd995b478a0d2e700c1818 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Fri, 27 Apr 2012 01:40:10 +0100 Subject: ARM: 7401/1: mm: Fix section mismatches WARNING: vmlinux.o(.text+0x111b8): Section mismatch in reference from the function arm_memory_present() to the function .init.text:memory_present() The function arm_memory_present() references the function __init memory_present(). This is often because arm_memory_present lacks a __init annotation or the annotation of memory_present is wrong. WARNING: arch/arm/mm/built-in.o(.text+0x1edc): Section mismatch in reference from the function alloc_init_pud() to the function .init.text:alloc_init_section() The function alloc_init_pud() references the function __init alloc_init_section(). This is often because alloc_init_pud lacks a __init annotation or the annotation of alloc_init_section is wrong. Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/mm/init.c | 4 ++-- arch/arm/mm/mmu.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 595079fa9d1d..8f5813bbffb5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM -static void arm_memory_present(void) +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(void) +static void __init arm_memory_present(void) { struct memblock_region *reg; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b86f8933ff91..2c7cf2f9c837 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, } } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, const struct mem_type *type) +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys, const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; -- cgit v1.2.3 From 9b61a4d1b2064dbd0c9e61754305ac852170509f Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 May 2012 15:19:20 +0100 Subject: ARM: prevent VM_GROWSDOWN mmaps extending below FIRST_USER_ADDRESS Cc: Reported-by: Al Viro Signed-off-by: Russell King --- arch/arm/mm/fault.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f07467533365..5bb48356d217 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -247,7 +247,9 @@ good_area: return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + /* Don't allow expansion below FIRST_USER_ADDRESS */ + if (vma->vm_flags & VM_GROWSDOWN && + addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) goto good_area; out: return fault; -- cgit v1.2.3 From 1a3abcf41f13666d4ed241c8cc7f48bd38e7b543 Mon Sep 17 00:00:00 2001 From: Vitaly Andrianov Date: Tue, 15 May 2012 15:01:16 +0100 Subject: ARM: 7418/1: LPAE: fix access flag setup in mem_type_table A zero value for prot_sect in the memory types table implies that section mappings should never be created for the memory type in question. This is checked for in alloc_init_section(). With LPAE, we set a bit to mask access flag faults for kernel mappings. This breaks the aforementioned (!prot_sect) check in alloc_init_section(). This patch fixes this bug by first checking for a non-zero prot_sect before setting the PMD_SECT_AF flag. Signed-off-by: Vitaly Andrianov Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 2c7cf2f9c837..aa78de8bfdd3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -489,7 +489,8 @@ static void __init build_mem_type_table(void) */ for (i = 0; i < ARRAY_SIZE(mem_types); i++) { mem_types[i].prot_pte |= PTE_EXT_AF; - mem_types[i].prot_sect |= PMD_SECT_AF; + if (mem_types[i].prot_sect) + mem_types[i].prot_sect |= PMD_SECT_AF; } kern_pgprot |= PTE_EXT_AF; vecs_pgprot |= PTE_EXT_AF; -- cgit v1.2.3