From 75da01e127f7db3b23effa6118336d303e7572a7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 31 Jan 2013 11:25:52 +0000 Subject: ARM: KVM: vgic: force EOIed LRs to the empty state The VGIC doesn't guarantee that an EOIed LR that has been configured to generate a maintenance interrupt will appear as empty. While the code recovers from this situation, it is better to clean the LR and flag it as empty so it can be quickly recycled. Signed-off-by: Marc Zyngier --- arch/arm/kvm/vgic.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..76ea1aa5e7d2 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - - goto out; + return true; } /* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); -out: if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1054,6 +1052,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } else { vgic_cpu_irq_clear(vcpu, irq); } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; } } -- cgit v1.2.3 From 33c83cb3c1d84b76c8270abe5487e77f83a81b22 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 1 Feb 2013 18:28:30 +0000 Subject: ARM: KVM: vgic: take distributor lock on sync_hwstate path Now that the maintenance interrupt handling is actually out of the handler itself, the code becomes quite racy as we can get preempted while we process the state. Wrapping this code around the distributor lock ensures that we're not preempted and relatively race-free. Signed-off-by: Marc Zyngier --- arch/arm/kvm/vgic.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index 76ea1aa5e7d2..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -1016,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); - /* - * We do not need to take the distributor lock here, since the only - * action we perform is clearing the irq_active_bit for an EOIed - * level interrupt. There is a potential race with - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we - * check if the interrupt is already active. Two possibilities: - * - * - The queuing is occurring on the same vcpu: cannot happen, - * as we're already in the context of this vcpu, and - * executing the handler - * - The interrupt has been migrated to another vcpu, and we - * ignore this interrupt for this run. Big deal. It is still - * pending though, and will get considered when this vcpu - * exits. - */ if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { /* * Some level interrupts have been EOIed. Clear their @@ -1069,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing. */ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { @@ -1117,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + if (!irqchip_in_kernel(vcpu->kvm)) return; + spin_lock(&dist->lock); __kvm_vgic_sync_hwstate(vcpu); + spin_unlock(&dist->lock); } int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 1540c85b176180e5e0b312dd98db7f438baf8a24 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 7 Mar 2013 16:47:23 +0530 Subject: ARC: make allyesconfig build breakages CC drivers/mmc/host/mmc_spi.o drivers/mmc/host/mmc_spi.c:118: error: redefinition of 'struct scratch' make[3]: *** [drivers/mmc/host/mmc_spi.o] Error 1 make[2]: *** [drivers/mmc/host] Error 2 make[1]: *** [drivers/mmc] Error 2 make: *** [drivers] Error 2 CC arch/arc/kernel/kgdb.o In file included from include/linux/kgdb.h:20, from arch/arc/kernel/kgdb.c:11: /home/vineetg/arc/k.org/arc-port/arch/arc/include/asm/kgdb.h:34: warning: 'struct pt_regs' declared inside parameter list /home/vineetg/arc/k.org/arc-port/arch/arc/include/asm/kgdb.h:34: warning: its scope is only this definition or declaration, which is probably not what you want arch/arc/kernel/kgdb.c:172: error: conflicting types for 'kgdb_trap' CC arch/arc/kernel/kgdb.o arch/arc/kernel/kgdb.c: In function 'pt_regs_to_gdb_regs': arch/arc/kernel/kgdb.c:62: error: dereferencing pointer to incomplete type Signed-off-by: Vineet Gupta --- arch/arc/include/asm/kgdb.h | 6 ++---- arch/arc/include/uapi/asm/ptrace.h | 4 ++-- arch/arc/kernel/kgdb.c | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h index f3c4934f0ca9..4930957ca3d3 100644 --- a/arch/arc/include/asm/kgdb.h +++ b/arch/arc/include/asm/kgdb.h @@ -13,7 +13,7 @@ #ifdef CONFIG_KGDB -#include +#include /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set * register API yet */ @@ -53,9 +53,7 @@ enum arc700_linux_regnums { }; #else -static inline void kgdb_trap(struct pt_regs *regs, int param) -{ -} +#define kgdb_trap(regs, param) #endif #endif /* __ARC_KGDB_H__ */ diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 6afa4f702075..30333cec0fef 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -28,14 +28,14 @@ */ struct user_regs_struct { - struct scratch { + struct { long pad; long bta, lp_start, lp_end, lp_count; long status32, ret, blink, fp, gp; long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; long sp; } scratch; - struct callee { + struct { long pad; long r25, r24, r23, r22, r21, r20; long r19, r18, r17, r16, r15, r14, r13; diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index 2888ba5be47e..52bdc83c1495 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -9,6 +9,7 @@ */ #include +#include #include #include -- cgit v1.2.3 From 8ff14bbc6a2083e83c6d387d025fb67ba639807c Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 6 Mar 2013 14:33:27 +0530 Subject: ARC: ABIv3: Print the correct ABI ver Signed-off-by: Vineet Gupta --- arch/arc/kernel/setup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index dc0f968dae0a..2d95ac07df7b 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "\n"); -#ifdef _ASM_GENERIC_UNISTD_H n += scnprintf(buf + n, len - n, - "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); -#endif + "OS ABI [v3]\t: no-legacy-syscalls\n"); return buf; } -- cgit v1.2.3 From 180d406e4948faee6e63781f3e062f40ec7c6fc3 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 4 Mar 2013 16:01:35 +0530 Subject: ARC: ABIv3: fork/vfork wrappers not needed in "no-legacy-syscall" ABI When switching to clone() only ABI - I missed out pruning the low level asm syscall wrappers Signed-off-by: Vineet Gupta --- arch/arc/include/asm/syscalls.h | 2 -- arch/arc/kernel/entry.S | 25 ------------------------- arch/arc/kernel/sys.c | 2 -- 3 files changed, 29 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h index e53a5340ba4f..dd785befe7fd 100644 --- a/arch/arc/include/asm/syscalls.h +++ b/arch/arc/include/asm/syscalls.h @@ -16,8 +16,6 @@ #include int sys_clone_wrapper(int, int, int, int, int); -int sys_fork_wrapper(void); -int sys_vfork_wrapper(void); int sys_cacheflush(uint32_t, uint32_t uint32_t); int sys_arc_settls(void *); int sys_arc_gettls(void); diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index ef6800ba2f03..b9d875a441cc 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork ;################### Special Sys Call Wrappers ########################## -; TBD: call do_fork directly from here -ARC_ENTRY sys_fork_wrapper - SAVE_CALLEE_SAVED_USER - bl @sys_fork - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -ARC_EXIT sys_fork_wrapper - -ARC_ENTRY sys_vfork_wrapper - SAVE_CALLEE_SAVED_USER - bl @sys_vfork - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -ARC_EXIT sys_vfork_wrapper - ARC_ENTRY sys_clone_wrapper SAVE_CALLEE_SAVED_USER bl @sys_clone diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c index f6bdd07583f3..9d6c1ca26af6 100644 --- a/arch/arc/kernel/sys.c +++ b/arch/arc/kernel/sys.c @@ -6,8 +6,6 @@ #include #define sys_clone sys_clone_wrapper -#define sys_fork sys_fork_wrapper -#define sys_vfork sys_vfork_wrapper #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), -- cgit v1.2.3 From 63981a4096081f3a35351f56fa89e91f493c02c7 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 22 Jan 2013 10:58:21 +0100 Subject: MIPS: compat: Return same error ENOSYS as native for invalid operation. The pains for multiplexed syscalls. Noticed by Al Viro . Signed-off-by: Ralf Baechle --- arch/mips/kernel/linux32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c0..db9655f08892 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, err = compat_sys_shmctl(first, second, compat_ptr(ptr)); break; default: - err = -EINVAL; + err = -ENOSYS; break; } -- cgit v1.2.3 From e744109fce4b9581acdc23287667e240bcd239b7 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sun, 3 Mar 2013 11:39:35 +0000 Subject: MIPS: Use CONFIG_CPU_MIPSR2 in csum_partial.S The csum_partial implementation contain optimalizations for the MIPS R2 instruction set. This optimization is never enabled however because the if directive uses the CPU_MIPSR2 constant which is not defined anywhere. Use the CONFIG_CPU_MIPSR2 constant instead. Signed-off-by: Gabor Juhos Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/4971/ Signed-off-by: Ralf Baechle --- arch/mips/lib/csum_partial.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd41..a6adffbb4e5f 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -270,7 +270,7 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, t7 #else @@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) addu sum, v1 #endif -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, odd #else -- cgit v1.2.3 From 0c81157b46c533139d6be721d41617020c59a2c3 Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 26 Feb 2013 14:35:23 -0800 Subject: MIPS: Fix logic errors in bitops.c commit 92d11594f6 (MIPS: Remove irqflags.h dependency from bitops.h) factored some of the bitops code out into a separate file (arch/mips/lib/bitops.c). Unfortunately the logic converting a bit mask into a boolean result was lost in some of the functions. We had: int res; unsigned long shifted_result_bit; . . . res = shifted_result_bit; return res; Which truncates off the high 32 bits (thus yielding an incorrect value) on 64-bit systems. The manifestation of this is that a non-SMP 64-bit kernel will not boot as the bitmap operations in bootmem.c are all screwed up. Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Cc: Jim Quinlan Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/4965/ Signed-off-by: Ralf Baechle --- arch/mips/lib/bitops.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab8..a64daee740ee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a &= ~mask; raw_local_irq_restore(flags); return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a ^= mask; raw_local_irq_restore(flags); return res; -- cgit v1.2.3 From 9f91e5064c6c24d6ccd760a6b6120de4560d9829 Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Thu, 21 Feb 2013 15:38:19 +0900 Subject: MIPS: VR4133: Fix probe for LL/SC. Signed-off-by: Yoichi Yuasa Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/4963/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/cpu-probe.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..d069a19112e8 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->tlbsize = 48; break; case PRID_IMP_VR41XX: + set_isa(c, MIPS_CPU_ISA_III); + c->options = R4K_OPTS; + c->tlbsize = 32; switch (c->processor_id & 0xf0) { case PRID_REV_VR4111: c->cputype = CPU_VR4111; @@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC VR4131"; } else { c->cputype = CPU_VR4133; + c->options |= MIPS_CPU_LLSC; __cpu_name[cpu] = "NEC VR4133"; } break; @@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC Vr41xx"; break; } - set_isa(c, MIPS_CPU_ISA_III); - c->options = R4K_OPTS; - c->tlbsize = 32; break; case PRID_IMP_R4300: c->cputype = CPU_R4300; -- cgit v1.2.3 From 383c97b4587665d47d08dff37ec85b44be760505 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 4 Mar 2013 04:17:21 +0000 Subject: MIPS: Add dependencies for HAVE_ARCH_TRANSPARENT_HUGEPAGE The MIPS implementation of transparent huge-pages (THP) is 64-bit only, and of course also requires that the CPU supports huge-pages. Currently it's entirely possible to enable THP in other configurations, which then fail to build due to pfn_pmd() not being defined. Signed-off-by: Ben Hutchings Cc: linux-mips@linux-mips.org Acked-by: David Daney Patchwork: https://patchwork.linux-mips.org/patch/4972/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ae9c716c46bb..e8e10b794b87 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,7 @@ config MIPS select HAVE_KRETPROBES select HAVE_DEBUG_KMEMLEAK select ARCH_BINFMT_ELF_RANDOMIZE_PIE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT select RTC_LIB if !MACH_LOONGSON select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE -- cgit v1.2.3 From 631b0af98c1efb160f02154743ae9f13fe03e347 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Tue, 5 Mar 2013 13:20:20 +0100 Subject: MIPS: Get rid of CONFIG_CPU_HAS_LLSC again Commit f7ade3c168e4f437c11f57be012992bbb0e3075c ("MIPS: Get rid of CONFIG_CPU_HAS_LLSC") did what it promised to do. But since then that macro and its Kconfig symbol popped up again. Get rid of those again. Signed-off-by: Paul Bolle Cc: Jonas Gorski Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/4978/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 1 - arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h | 4 ---- 2 files changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e8e10b794b87..1a166d98773e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1493,7 +1493,6 @@ config CPU_XLP select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM - select CPU_HAS_LLSC select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_HAS_PREFETCH diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c828419037..b40f37fb3dee 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,11 +28,7 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ -#ifdef CONFIG_CPU_HAS_LLSC -#define cpu_has_llsc 1 -#else #define cpu_has_llsc 0 -#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ -- cgit v1.2.3 From f4cdb6a00c148e7724ada0998643b293a52b5f62 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 12 Mar 2013 16:06:07 +0100 Subject: MIPS: SEAD3: Enable LL/SC. All synthesizable CPU cores that could be loaded into a SEAD3's FPGA are MIPS32 or MIPS64 CPUs that have ll/sc. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index b40f37fb3dee..193c0912d38e 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,7 +28,7 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ -#define cpu_has_llsc 0 +#define cpu_has_llsc 1 /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ -- cgit v1.2.3 From 13872ebb915b547b3c0a1fc04f549a1475bb7989 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Wed, 13 Mar 2013 04:10:57 -0600 Subject: ARM: OMAP1: fix USB host on 1710 There is a long-standing bug that OHCI USB host controller does not respond on 1710, because of wrong clock definitions. See e.g. http://marc.info/?l=linux-omap&m=119634441229321&w=2. All register reads return just zeroes: [ 1.896606] ohci ohci: OMAP OHCI [ 1.912597] ohci ohci: new USB bus registered, assigned bus number 1 [ 1.933776] ohci ohci: irq 38, io mem 0xfffba000 [ 2.012573] ohci ohci: init err (00000000 0000) [ 2.030334] ohci ohci: can't start [ 2.046661] ohci ohci: startup error -75 [ 2.063201] ohci ohci: USB bus 1 deregistered After some experiments, it seems that when changing the usb_dc_ck / SOFT_REQ enable bit from USB_REQ_EN_SHIFT to SOFT_USB_OTG_DPLL_REQ_SHIFT (like done also on 7XX), the USB appears to work: [ 2.183959] ohci ohci: OMAP OHCI [ 2.198242] ohci ohci: new USB bus registered, assigned bus number 1 [ 2.215820] ohci ohci: irq 38, io mem 0xfffba000 [ 2.324798] hub 1-0:1.0: USB hub found [ 2.361267] hub 1-0:1.0: 3 ports detected The patch is tested on Nokia 770. Signed-off-by: Aaro Koskinen Signed-off-by: Paul Walmsley --- arch/arm/mach-omap1/clock_data.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae2e3fc..6c4f766365a2 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c @@ -538,15 +538,6 @@ static struct clk usb_hhc_ck16xx = { }; static struct clk usb_dc_ck = { - .name = "usb_dc_ck", - .ops = &clkops_generic, - /* Direct from ULPD, no parent */ - .rate = 48000000, - .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), - .enable_bit = USB_REQ_EN_SHIFT, -}; - -static struct clk usb_dc_ck7xx = { .name = "usb_dc_ck", .ops = &clkops_generic, /* Direct from ULPD, no parent */ @@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = { CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), + CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), CLK(NULL, "mclk", &mclk_16xx, CK_16XX), CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), -- cgit v1.2.3 From 71b37071f02e20345dcc0d570c69896da795e1e6 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 13 Mar 2013 04:11:23 -0600 Subject: ARM: OMAP4: clock data: lock USB DPLL on boot Some versions of the u-boot bootloader do not lock the USB DPLL and when the USB DPLL is not locked, then it is observed that the L3INIT power domain does not transition to retention state during kernel suspend on OMAP4 devices. Fix this by locking the USB DPLL at 960 MHz on kernel boot. Signed-off-by: Jon Hunter Signed-off-by: Paul Walmsley --- arch/arm/mach-omap2/cclock44xx_data.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f335f173..015bcdc34b4a 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -52,6 +52,13 @@ */ #define OMAP4_DPLL_ABE_DEFFREQ 98304000 +/* + * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section + * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred + * locked frequency for the USB DPLL is 960MHz. + */ +#define OMAP4_DPLL_USB_DEFFREQ 960000000 + /* Root clocks */ DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); @@ -1705,5 +1712,13 @@ int __init omap4xxx_clk_init(void) if (rc) pr_err("%s: failed to configure ABE DPLL!\n", __func__); + /* + * Lock USB DPLL on OMAP4 devices so that the L3INIT power + * domain can transition to retention state when not in use. + */ + rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); + if (rc) + pr_err("%s: failed to configure USB DPLL!\n", __func__); + return 0; } -- cgit v1.2.3 From 092bc089c249de0fa0f0c98b28dea6e5f1367b6e Mon Sep 17 00:00:00 2001 From: Grazvydas Ignotas Date: Mon, 11 Mar 2013 21:49:00 +0200 Subject: ARM: OMAP3: hwmod data: keep MIDLEMODE in force-standby for musb For some unknown reason, allowing hwmod to control MIDLEMODE causes core_pwrdm to not hit idle states for musb in DM3730 at least. I've verified that setting any MIDLEMODE value other than "force standby" before enabling the device causes subsequent suspend attempts to fail with core_pwrdm not entering idle states, even if the driver is unloaded and "force standby" is restored before suspend attempt. To recover from this, soft reset can be used, but that's not suitable solution for suspend. Keeping the register set at force standby (reset value) makes it work and device still functions properly, as musb has driver-controlled OTG_FORCESTDBY register that controls MSTANDBY signal. Note that TI PSP kernels also have similar workarounds. This patch also fixes HWMOD_SWSUP_MSTANDBY documentation to match the actual flag name. Signed-off-by: Grazvydas Ignotas Signed-off-by: Paul Walmsley --- arch/arm/mach-omap2/omap_hwmod.c | 7 +++++-- arch/arm/mach-omap2/omap_hwmod.h | 9 +++++++-- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 7 ++++++- 3 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c08c2b..a202a4785104 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if (oh->flags & HWMOD_FORCE_MSTANDBY) { + idlemode = HWMOD_IDLEMODE_FORCE; + } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) @@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || + (oh->flags & HWMOD_FORCE_MSTANDBY)) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b608eda..d5dc935f6060 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm { * * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out * of idle, rather than relying on module smart-idle - * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out - * of standby, rather than relying on module smart-standby + * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and + * out of standby, rather than relying on module smart-standby * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for * SDRAM controller, etc. XXX probably belongs outside the main hwmod file * XXX Should be HWMOD_SETUP_NO_RESET @@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm { * correctly, or this is being abused to deal with some PM latency * issues -- but we're currently suffering from a shortage of * folks who are able to track these issues down properly. + * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device + * is kept in force-standby mode. Failing to do so causes PM problems + * with musb on OMAP3630 at least. Note that musb has a dedicated register + * to control MSTANDBY signal when MIDLEMODE is set to force-standby. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_16BIT_REG (1 << 8) #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) #define HWMOD_BLOCK_WFI (1 << 10) +#define HWMOD_FORCE_MSTANDBY (1 << 11) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03ec952f..5112d04e7b79 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. + * + * Enabling the device in any other MIDLEMODE setting but force-idle + * causes core_pwrdm not enter idle states at least on OMAP3630. + * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY + * signal when MIDLEMODE is set to force-idle. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE - | HWMOD_SWSUP_MSTANDBY, + | HWMOD_FORCE_MSTANDBY, }; /* usb_otg_hs */ -- cgit v1.2.3 From 92702df3570e1ccfa050e135e50c450502251b79 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 12 Feb 2013 20:28:12 +0000 Subject: ARM: OMAP4: PM: fix PM regression introduced by recent clock cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 17b7e7d33530e2bbd3bdc90f4db09b91cfdde2bb ("ARM: OMAP4: clock/hwmod data: start to remove some IP block control "clocks"") introduced a regression preventing the L3INIT clockdomain of OMAP4 systems from entering idle. This in turn prevented these systems from entering full chip clock-stop. The regression was caused by the incorrect removal of a so-called "optional functional clock" from the OMAP4 clock data. This wasn't caught for two reasons. First, I missed the retention entry failure in the branch test logs: http://www.pwsan.com/omap/testlogs/cleanup_a_3.9/20130126014242/pm/4460pandaes/4460pandaes_log.txt Second, the integration data for the OCP2SCP PHY IP block, added by commit 0c6688753f9912c6a7013549ec31c8844020bbc1 ("ARM: OMAP4: hwmod data: add remaining USB-related IP blocks"), should have associated this clock with the IP block, but did not. Fix by adding back the so-called "optional" functional clock to the clock data, and by linking that clock to the OCP2SCP PHY IP block integration hwmod data. The problem patch was discovered by J, Keerthy . Cc: Keerthy Cc: Benoît Cousson Signed-off-by: Paul Walmsley --- arch/arm/mach-omap2/cclock44xx_data.c | 5 +++++ arch/arm/mach-omap2/omap_hwmod_44xx_data.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 015bcdc34b4a..0c6834ae1fc4 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -1018,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); +DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, + OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, + OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); + DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); @@ -1545,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), + CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X), CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e1687c..9e0576569e07 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = { { } }; +static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = { + { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" }, +}; + /* ocp2scp_usb_phy */ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", @@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { }, }, .dev_attr = ocp2scp_dev_attr, + .opt_clks = ocp2scp_usb_phy_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks), }; /* -- cgit v1.2.3 From a4285b99e0087361c61f51c819633382fa659ea6 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 13 Mar 2013 20:54:34 +0100 Subject: MIPS: Fix inconsistent formatting inside /proc/cpuinfo There is a missing " " inside /proc/cpuinfo. The bad commit was: commit a96102be700f87283f168942cd09a2b30f86f324 Author: Steven J. Hill Date: Fri Dec 7 04:31:36 2012 +0000 MIPS: Add printing of ISA version in cpuinfo. Signed-off-by: John Crispin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/4988/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccbe..7a54f74b7818 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mips_r) { seq_printf(m, "isa\t\t\t:"); if (cpu_has_mips_1) - seq_printf(m, "%s", "mips1"); + seq_printf(m, "%s", " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) -- cgit v1.2.3 From 605c357bb40ce412c086f3a928d07c1d4349b95b Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Fri, 15 Mar 2013 19:16:52 -0500 Subject: ARM: ep93xx: Fix wait for UART FIFO to be empty Commit 210dce5f "ARM: ep93xx: properly wait for UART FIFO to be empty" Removed the timeout loop while waiting for the uart transmit fifo to empty. Some bootloaders leave the uart in a state where there might be bytes in the uart that are not transmitted when execution is handed over to the kernel. This results in a deadlocked system while waiting for the fifo to empty. Add back the timeout wait to prevent the deadlock. Increase the wait time to hopefully prevent the decompressor corruption that lead to commit 210dce5f. This corruption was probably due to a slow uart baudrate. The 10* increase in the wait time should be enough for all cases. Signed-off-by: H Hartley Sweeten Acked-by: Florian Fainelli Signed-off-by: Ryan Mallon --- arch/arm/mach-ep93xx/include/mach/uncompress.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4dd82ab..b5cc77d2380b 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h @@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr) static inline void putc(int c) { - /* Transmit fifo not full? */ - while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) - ; + int i; + + for (i = 0; i < 10000; i++) { + /* Transmit fifo not full? */ + if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) + break; + } __raw_writeb(c, PHYS_UART_DATA); } -- cgit v1.2.3 From a37b2dc52b88ccd926099d852eae1bb324bc92eb Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 2 Mar 2013 12:31:39 +0530 Subject: ARC: Remove SET_PERSONALITY (tracks cross-arch change) Tracks commit e72837e3e7b "default SET_PERSONALITY() in linux/elf.h" Signed-off-by: Vineet Gupta --- arch/arc/include/asm/elf.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index f4c8d36ebecb..a26282857683 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *); */ #define ELF_PLATFORM (NULL) -#define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) - #endif -- cgit v1.2.3 From 66db3feb486c01349f767b98ebb10b0c3d2d021b Mon Sep 17 00:00:00 2001 From: CQ Tang Date: Mon, 18 Mar 2013 11:02:21 -0400 Subject: x86-64: Fix the failure case in copy_user_handle_tail() The increment of "to" in copy_user_handle_tail() will have incremented before a failure has been noted. This causes us to skip a byte in the failure case. Only do the increment when assured there is no failure. Signed-off-by: CQ Tang Link: http://lkml.kernel.org/r/20130318150221.8439.993.stgit@phlsvslse11.ph.intel.com Signed-off-by: Mike Marciniszyn Signed-off-by: H. Peter Anvin Cc: --- arch/x86/lib/usercopy_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 05928aae911e..906fea315791 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) char c; unsigned zero_len; - for (; len; --len) { + for (; len; --len, to++) { if (__get_user_nocheck(c, from++, sizeof(char))) break; - if (__put_user_nocheck(c, to++, sizeof(char))) + if (__put_user_nocheck(c, to, sizeof(char))) break; } -- cgit v1.2.3 From 6a15075eced2d780fa6c5d83682410f47f2e800b Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 18 Mar 2013 19:24:02 +0100 Subject: ARM: video: mxs: Fix mxsfb misconfiguring VDCTRL0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The issue fixed by this patch manifests only then using X11 with mxsfb driver. The X11 will display either shifted image or otherwise distorted image on the LCD. The problem is that the X11 tries to reconfigure the framebuffer and along the way calls fb_ops.fb_set_par() with X11's desired configuration values. The field of particular interest is fb_info->var.sync which contains non-standard values if configured by kernel. These are either FB_SYNC_DATA_ENABLE_HIGH_ACT, FB_SYNC_DOTCLK_FAILING_ACT or both, depending on the platform configuration. Both of these values are defined in the include/linux/mxsfb.h file. The driver interprets these values and configures the LCD controller accordingly. Yet X11 only has access to the standard values for this field defined in include/uapi/linux/fb.h and thus, unlike kernel, omits these special values. This results in distorted image on the LCD. This patch moves these non-standard values into new field of the mxsfb_platform_data structure so the driver can in turn check this field instead of the video mode field for these specific portions. Moreover, this patch prefixes these values with MXSFB_SYNC_ prefix instead of FB_SYNC_ prefix to prevent confusion of subsequent users. Signed-off-by: Marek Vasut Cc: Fabio Estevam Cc: Linux ARM Cc: Linux FBDEV Cc: Lothar Waßmann Cc: Sascha Hauer Tested-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/mach-mxs/mach-mxs.c | 24 ++++++++++++------------ drivers/video/mxsfb.c | 7 +++++-- include/linux/mxsfb.h | 7 +++++-- 3 files changed, 22 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 3218f1f2c0e0..e7b781d3788f 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = { .lower_margin = 4, .hsync_len = 1, .vsync_len = 1, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, }, }; @@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = { .lower_margin = 10, .hsync_len = 10, .vsync_len = 10, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, }, }; @@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = { .lower_margin = 45, .hsync_len = 1, .vsync_len = 1, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT, }, }; @@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = { .lower_margin = 13, .hsync_len = 48, .vsync_len = 3, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | - FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; @@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = { .lower_margin = 0x15, .hsync_len = 64, .vsync_len = 4, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | - FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; @@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = { .lower_margin = 2, .hsync_len = 15, .vsync_len = 15, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT }, }; @@ -259,6 +249,8 @@ static void __init imx23_evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } static inline void enable_clk_enet_out(void) @@ -278,6 +270,8 @@ static void __init imx28_evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } @@ -297,6 +291,7 @@ static void __init m28evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); mxsfb_pdata.default_bpp = 16; mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; } static void __init sc_sps1_init(void) @@ -322,6 +317,8 @@ static void __init apx4devkit_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } #define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) @@ -407,6 +404,7 @@ static void __init cfa10049_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; } static void __init cfa10037_init(void) @@ -423,6 +421,8 @@ static void __init apf28_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); mxsfb_pdata.default_bpp = 16; mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } static void __init mxs_machine_init(void) diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 755556ca5b2d..45169cbaba6e 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -169,6 +169,7 @@ struct mxsfb_info { unsigned dotclk_delay; const struct mxsfb_devdata *devdata; int mapped; + u32 sync; }; #define mxsfb_is_v3(host) (host->devdata->ipversion == 3) @@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info) vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; - if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) + if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT) vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; - if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT) + if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT) vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; writel(vdctrl0, host->base + LCDC_VDCTRL0); @@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&fb_info->modelist); + host->sync = pdata->sync; + ret = mxsfb_init_fbinfo(host); if (ret != 0) goto error_init_fb; diff --git a/include/linux/mxsfb.h b/include/linux/mxsfb.h index f14943d55315..f80af8674342 100644 --- a/include/linux/mxsfb.h +++ b/include/linux/mxsfb.h @@ -24,8 +24,8 @@ #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */ #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ -#define FB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) -#define FB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ +#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) +#define MXSFB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ struct mxsfb_platform_data { struct fb_videomode *mode_list; @@ -44,6 +44,9 @@ struct mxsfb_platform_data { * allocated. If specified,fb_size must also be specified. * fb_phys must be unused by Linux. */ + u32 sync; /* sync mask, contains MXSFB specifics not + * carried in fb_info->var.sync + */ }; #endif /* __LINUX_MXSFB_H */ -- cgit v1.2.3 From 287939a3690c8da6fd3310d7593ff0448cb9447c Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Wed, 13 Mar 2013 10:52:49 +0800 Subject: ARM: imx: add dependency check for DEBUG_IMX_UART_PORT While adding i.MX DEBUG_LL selection, commit f8c95fe (ARM: imx: support DEBUG_LL uart port selection for all i.MX SoCs) leaves Kconfig symbol DEBUG_IMX_UART_PORT there without any dependency check. This results in that everyone gets the symbol in their config, which is someting undesirable. Add "depends on ARCH_MXC" for the symbol to prevent that. Reported-by: Karl Beldan Signed-off-by: Shawn Guo --- arch/arm/Kconfig.debug | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index ecfcdba2d17c..9b31f4311ea2 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT DEBUG_IMX53_UART || \ DEBUG_IMX6Q_UART default 1 + depends on ARCH_MXC help Choose UART port on which kernel low-level debug messages should be output. -- cgit v1.2.3 From 2105fd550ca7dbdd490934f487852c2a399b20cf Mon Sep 17 00:00:00 2001 From: Pierrick Hascoet Date: Mon, 18 Mar 2013 17:04:45 +0100 Subject: arc: fix dma_address assignment during dma_map_sg() Signed-off-by: Pierrick Hascoet Signed-off-by: Vineet Gupta --- arch/arc/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 31f77aec0823..45b8e0cea176 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) - sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, + s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->length, dir); return nents; -- cgit v1.2.3 From 63c2b6812f1dc0beda4d6adad0365e048aa693e2 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 18 Mar 2013 15:56:10 +0000 Subject: MIPS: Fix code generation for non-DSP capable CPUs Commit 32a7ede (MIPS: dsp: Add assembler support for DSP ASEs) has enabled the use of DSP ASE specific instructions such as rddsp and wrdsp under the idea that all code path that will make use of these two instructions are properly checking for cpu_has_dsp to ensure that the particular CPU we are running on *actually* supports DSP ASE. This commit actually causes the following oops on QEMU Malta emulating a MIPS 24Kc without the DSP ASE implemented: [ 7.960000] Reserved instruction in kernel [ 7.960000] Cpu 0 [ 7.960000] $ 0 : 00000000 00000000 00000014 00000005 [ 7.960000] $ 4 : 8fc2de48 00000001 00000000 8f59ddb0 [ 7.960000] $ 8 : 8f5ceec4 00000018 00000c00 00800000 [ 7.960000] $12 : 00000100 00000200 00000000 00457b84 [ 7.960000] $16 : 00000000 8fc2ba78 8f4ec980 00000001 [ 7.960000] $20 : 80418f90 00000000 00000000 000002dd [ 7.960000] $24 : 0000009c 7730d7b8 [ 7.960000] $28 : 8f59c000 8f59dd38 00000001 80104248 [ 7.960000] Hi : 0000001d [ 7.960000] Lo : 0000000b [ 7.960000] epc : 801041ec thread_saved_pc+0x2c/0x38 [ 7.960000] Not tainted [ 7.960000] ra : 80104248 get_wchan+0x48/0xac [ 7.960000] Status: 1000b703 KERNEL EXL IE [ 7.960000] Cause : 10800028 [ 7.960000] PrId : 00019300 (MIPS 24Kc) [ 7.960000] Modules linked in: [ 7.960000] Process killall (pid: 1574, threadinfo=8f59c000, task=8fd14558, tls=773aa440) [ 7.960000] Stack : 8fc2ba78 8012b008 0000000c 0000001d 00000000 00000000 8f58a380 8f58a380 8fc2ba78 80202668 8f59de78 8f468600 8f59de28 801b2a3c 8f59df00 8f98ba20 74696e69 8f468600 8f59de28 801b7308 0081c007 00000000 00000000 00000000 00000000 00000000 00000000 00000000 8fc2bbb4 00000001 0000001d 0000000b 77f038cc 7fe80648 ffffffff ffffffff 00000000 00000001 0016e000 00000000 ... [ 7.960000] Call Trace: [ 7.960000] [<801041ec>] thread_saved_pc+0x2c/0x38 [ 7.960000] [<80104248>] get_wchan+0x48/0xac The disassembly of thread_saved_pc points to the following: 000006d0 : 6d0: 8c820208 lw v0,520(a0) 6d4: 3c030000 lui v1,0x0 6d8: 24630000 addiu v1,v1,0 6dc: 10430008 beq v0,v1,700 6e0: 00000000 nop 6e4: 3c020000 lui v0,0x0 6e8: 8c43000c lw v1,12(v0) 6ec: 04620004 bltzl v1,700 6f0: 00001021 move v0,zero 6f4: 8c840200 lw a0,512(a0) 6f8: 00031080 sll v0,v1,0x2 6fc: 7c44100a lwx v0,a0(v0) <------------ 700: 03e00008 jr ra 704: 00000000 nop If we specifically disable -mdsp/-mdspr2 for arch/mips/kernel/process.o, we get the following (non-crashing) assembly: 00000708 : 708: 8c820208 lw v0,520(a0) 70c: 3c030000 lui v1,0x0 710: 24630000 addiu v1,v1,0 714: 10430009 beq v0,v1,73c 718: 00000000 nop 71c: 3c020000 lui v0,0x0 720: 8c42000c lw v0,12(v0) 724: 04420005 bltzl v0,73c 728: 00001021 move v0,zero 72c: 8c830200 lw v1,512(a0) 730: 00021080 sll v0,v0,0x2 734: 00431021 addu v0,v0,v1 738: 8c420000 lw v0,0(v0) 73c: 03e00008 jr ra 740: 00000000 nop The specific line that leads a different assembly being produced is: unsigned long thread_saved_pc(struct task_struct *tsk) ... return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; <--- The problem here is that the compiler was given the right to use DSP instructions with the -mdsp / -mdspr2 command-line switches and performed some optimization for us and used DSP ASE instructions where we are not checking that the running CPU actually supports DSP ASE. This patch fixes the issue by partially reverting commit 32a7ede for arch/mips/kernel/Makefile in order to remove the -mdsp / -mdspr2 compiler command-line switches such that we are now guaranteed that the compiler will not optimize using DSP ASE reserved instructions. We also need to fixup the rddsp/wrdsp and m{t,h}{hi,lo}{0,1,2,3} macros in arch/mips/include/asm/mipsregs.h to tell the assembler that we are going to explicitely use DSP ASE reserved instructions. The comment in arch/mips/kernel/Makefile is also updated to reflect that. Signed-off-by: Florian Fainelli Acked-by: Steven J. Hill Cc: linux-mips@linux-mips.org Cc: blogic@openwrt.org Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mipsregs.h | 209 +++++++++++++++++++++++++++++++++++---- arch/mips/kernel/Makefile | 25 ++--- 2 files changed, 196 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906a..0da44d422f5b 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1166,7 +1166,10 @@ do { \ unsigned int __dspctl; \ \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " rddsp %0, %x1 \n" \ + " .set pop \n" \ : "=r" (__dspctl) \ : "i" (mask)); \ __dspctl; \ @@ -1175,30 +1178,198 @@ do { \ #define wrdsp(val, mask) \ do { \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " wrdsp %0, %x1 \n" \ + " .set pop \n" \ : \ : "r" (val), "i" (mask)); \ } while (0) -#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) -#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) -#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) -#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) - -#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) -#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) -#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) -#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) - -#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) -#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) -#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) -#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) - -#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) -#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) -#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) -#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) +#define mflo0() \ +({ \ + long mflo0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mflo0)); \ + mflo0; \ +}) + +#define mflo1() \ +({ \ + long mflo1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mflo1)); \ + mflo1; \ +}) + +#define mflo2() \ +({ \ + long mflo2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mflo2)); \ + mflo2; \ +}) + +#define mflo3() \ +({ \ + long mflo3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mflo3)); \ + mflo3; \ +}) + +#define mfhi0() \ +({ \ + long mfhi0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mfhi0)); \ + mfhi0; \ +}) + +#define mfhi1() \ +({ \ + long mfhi1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mfhi1)); \ + mfhi1; \ +}) + +#define mfhi2() \ +({ \ + long mfhi2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mfhi2)); \ + mfhi2; \ +}) + +#define mfhi3() \ +({ \ + long mfhi3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mfhi3)); \ + mfhi3; \ +}) + + +#define mtlo0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) #else diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184c..de75fb50562b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o # -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe -# to enable DSP assembler support here even if the MIPS Release 2 CPU we -# are targetting does not support DSP because all code-paths making use of -# it properly check that the running CPU *actually does* support these -# instructions. +# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not +# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches +# here because the compiler may use DSP ASE instructions (such as lwx) in +# code paths where we cannot check that the CPU we are running on supports it. +# Proper abstraction using HAVE_AS_DSP and macros is done in +# arch/mips/include/asm/mipsregs.h. # ifeq ($(CONFIG_CPU_MIPSR2), y) CFLAGS_DSP = -DHAVE_AS_DSP -# -# Check if assembler supports DSP ASE -# -ifeq ($(call cc-option-yn,-mdsp), y) -CFLAGS_DSP += -mdsp -endif - -# -# Check if assembler supports DSP ASE Rev2 -# -ifeq ($(call cc-option-yn,-mdspr2), y) -CFLAGS_DSP += -mdspr2 -endif - CFLAGS_signal.o = $(CFLAGS_DSP) CFLAGS_signal32.o = $(CFLAGS_DSP) CFLAGS_process.o = $(CFLAGS_DSP) -- cgit v1.2.3 From 97367519d7dac94de566796f034d5f72cbc671f7 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Tue, 19 Mar 2013 13:08:27 +0000 Subject: MIPS: BCM63XX: Make nvram checksum failure non fatal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some vendors modify the nvram layout moving the checksum to a different place or dropping entirely, so reduce the checksum failure to a warning. Reported-by: Álvaro Fernández Rojas Signed-off-by: Jonas Gorski Cc: linux-mips@linux-mips.org Cc: John Crispin Cc: Maxime Bizon Cc: Florian Fainelli Cc: Kevin Cernekee Signed-off-by: Ralf Baechle --- arch/mips/bcm63xx/boards/board_bcm963xx.c | 5 +---- arch/mips/bcm63xx/nvram.c | 7 +++---- arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h | 4 +--- 3 files changed, 5 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c29508..9aa7d44898ed 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -745,10 +745,7 @@ void __init board_prom_init(void) strcpy(cfe_version, "unknown"); printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); - if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { - printk(KERN_ERR PFX "invalid nvram checksum\n"); - return; - } + bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); board_name = bcm63xx_nvram_get_name(); /* find board by name */ diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 620611680839..a4b8864f9307 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -38,7 +38,7 @@ struct bcm963xx_nvram { static struct bcm963xx_nvram nvram; static int mac_addr_used; -int __init bcm63xx_nvram_init(void *addr) +void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; @@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) crc = crc32_le(~0, (u8 *)&nvram, check_len); if (crc != expected_crc) - return -EINVAL; - - return 0; + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", + expected_crc, crc); } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b7..4e0b6bc1165e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -9,10 +9,8 @@ * * Initialized the local nvram copy from the target address and checks * its checksum. - * - * Returns 0 on success. */ -int __init bcm63xx_nvram_init(void *nvram); +void bcm63xx_nvram_init(void *nvram); /** * bcm63xx_nvram_get_name() - returns the board name according to nvram -- cgit v1.2.3 From 1762c5ab7ce5f482aafaf9b6a721e41f073ae3cb Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Tue, 19 Mar 2013 13:20:19 +0000 Subject: Revert "MIPS: BCM63XX: Call board_register_device from device_initcall()" This commit causes a race between PCI scan and SSB fallback SPROM handler registration, causing the wifi to not work on slower systems. The only subsystem touched from board_register_devices is platform device registration, which is safe as an arch init call. This reverts commit d64ed7ada2f689d2c62af1892ca55e47d3653e36 [MIPS: BCM63XX: Call board_register_device from device_initcall()]. Signed-off-by: Jonas Gorski To: linux-mips@linux-mips.org Cc: John Crispin Cc: Maxime Bizon Cc: Florian Fainelli Cc: Kevin Cernekee Signed-off-by: Ralf Baechle --- arch/mips/bcm63xx/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788c..35e18e98beb9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) return board_register_devices(); } -device_initcall(bcm63xx_register_devices); +arch_initcall(bcm63xx_register_devices); -- cgit v1.2.3 From 2a1486981c1317dc4f4aad568f2cc6e49dfb8c82 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 19 Mar 2013 14:00:53 +0000 Subject: Fix breakage in MIPS siginfo handling MIPS's siginfo handling has been broken since this commit: commit 574c4866e33d648520a8bd5bf6f573ea6e554e88 Author: Al Viro Date: Sun Nov 25 22:24:19 2012 -0500 consolidate kernel-side struct sigaction declarations for 64-bit BE MIPS CPUs. The UAPI variant looks like this: struct sigaction { unsigned int sa_flags; __sighandler_t sa_handler; sigset_t sa_mask; }; but the core kernel's variant looks like this: struct sigaction { #ifndef __ARCH_HAS_ODD_SIGACTION __sighandler_t sa_handler; unsigned long sa_flags; #else unsigned long sa_flags; __sighandler_t sa_handler; #endif #ifdef __ARCH_HAS_SA_RESTORER __sigrestore_t sa_restorer; #endif sigset_t sa_mask; }; The problem is that sa_flags has been changed from an unsigned int to an unsigned long. Fix this by making sa_flags unsigned int if __ARCH_HAS_ODD_SIGACTION is defined. Whilst we're at it, rename __ARCH_HAS_ODD_SIGACTION to __ARCH_HAS_IRIX_SIGACTION. Signed-off-by: David Howells Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Acked-by: Al Viro Signed-off-by: Ralf Baechle --- arch/mips/include/asm/signal.h | 2 +- include/linux/compat.h | 4 ++-- include/linux/signal.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c201..8efe5a9e2c3e 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -21,6 +21,6 @@ #include #include -#define __ARCH_HAS_ODD_SIGACTION +#define __ARCH_HAS_IRIX_SIGACTION #endif /* _ASM_SIGNAL_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h index 76a87fb57ac2..377cd8c3395e 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -141,11 +141,11 @@ typedef struct { } compat_sigset_t; struct compat_sigaction { -#ifndef __ARCH_HAS_ODD_SIGACTION +#ifndef __ARCH_HAS_IRIX_SIGACTION compat_uptr_t sa_handler; compat_ulong_t sa_flags; #else - compat_ulong_t sa_flags; + compat_uint_t sa_flags; compat_uptr_t sa_handler; #endif #ifdef __ARCH_HAS_SA_RESTORER diff --git a/include/linux/signal.h b/include/linux/signal.h index a2dcb94ea49d..9475c5cb28bc 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -250,11 +250,11 @@ extern int show_unhandled_signals; extern int sigsuspend(sigset_t *); struct sigaction { -#ifndef __ARCH_HAS_ODD_SIGACTION +#ifndef __ARCH_HAS_IRIX_SIGACTION __sighandler_t sa_handler; unsigned long sa_flags; #else - unsigned long sa_flags; + unsigned int sa_flags; __sighandler_t sa_handler; #endif #ifdef __ARCH_HAS_SA_RESTORER -- cgit v1.2.3 From c83a9d5e425d4678b05ca058fec6254f18601474 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 19 Mar 2013 08:04:44 -0700 Subject: x86-32, microcode_intel_early: Fix crash with CONFIG_DEBUG_VIRTUAL In 32-bit, __pa_symbol() in CONFIG_DEBUG_VIRTUAL accesses kernel data (e.g. max_low_pfn) that not only hasn't been setup yet in such early boot phase, but since we are in linear mode, cannot even be detected as uninitialized. Thus, use __pa_nodebug() rather than __pa_symbol() to get a global symbol's physical address. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1363705484-27645-1-git-send-email-fenghua.yu@intel.com Reported-and-tested-by: Tetsuo Handa Signed-off-by: H. Peter Anvin --- arch/x86/kernel/microcode_intel_early.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index 7890bc838952..5992ee8086b7 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c @@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, struct microcode_intel ***mc_saved; mc_saved = (struct microcode_intel ***) - __pa_symbol(&mc_saved_data->mc_saved); + __pa_nodebug(&mc_saved_data->mc_saved); for (i = 0; i < mc_saved_data->mc_saved_count; i++) { struct microcode_intel *p; p = *(struct microcode_intel **) - __pa(mc_saved_data->mc_saved + i); - mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); + __pa_nodebug(mc_saved_data->mc_saved + i); + mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); } } #endif @@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end, struct cpio_data cd; long offset = 0; #ifdef CONFIG_X86_32 - char *p = (char *)__pa_symbol(ucode_name); + char *p = (char *)__pa_nodebug(ucode_name); #else char *p = ucode_name; #endif @@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci) if (mc_intel == NULL) return; - delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); - current_mc_date_p = (int *)__pa_symbol(¤t_mc_date); + delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); + current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); *delay_ucode_info_p = 1; *current_mc_date_p = mc_intel->hdr.date; @@ -741,15 +741,15 @@ load_ucode_intel_bsp(void) #ifdef CONFIG_X86_32 struct boot_params *boot_params_p; - boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); + boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params); ramdisk_image = boot_params_p->hdr.ramdisk_image; ramdisk_size = boot_params_p->hdr.ramdisk_size; initrd_start_early = ramdisk_image; initrd_end_early = initrd_start_early + ramdisk_size; _load_ucode_intel_bsp( - (struct mc_saved_data *)__pa_symbol(&mc_saved_data), - (unsigned long *)__pa_symbol(&mc_saved_in_initrd), + (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), + (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), initrd_start_early, initrd_end_early, &uci); #else ramdisk_image = boot_params.hdr.ramdisk_image; @@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void) unsigned long *initrd_start_p; mc_saved_in_initrd_p = - (unsigned long *)__pa_symbol(mc_saved_in_initrd); - mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); - initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); - initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); + (unsigned long *)__pa_nodebug(mc_saved_in_initrd); + mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); + initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); + initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); #else mc_saved_data_p = &mc_saved_data; mc_saved_in_initrd_p = mc_saved_in_initrd; -- cgit v1.2.3 From 367f3fcd9296977bc4689546f55c5f4a9c680e8d Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 20 Mar 2013 16:53:14 +0530 Subject: ARC: Fix the typo in event identifier flags used by ptrace orig_r8_IS_EXCPN and orig_r8_IS_BRKPT were same values due to a copy/paste error. Although it looks bad and is wrong, it really doesn't affect gdb working. orig_r8_IS_BRKPT is the one relevant to debugging (breakpoints), since it is used to provide EFA vs. ERET to a ptrace "stop_pc" request. So when gdb has inserted a breakpoint, orig_r8_IS_BRKPT is already set, and anything else (i.e. orig_r8_IS_EXCPN) becoming same as it, really doesn't hurt gdb. The corollary case, could be nasty but nobody uses the ptrace "stop_pc" request in that case Signed-off-by: Vineet Gupta --- arch/arc/include/asm/entry.h | 2 +- arch/arc/include/asm/ptrace.h | 2 +- arch/arc/kernel/entry.S | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 23daa326fc9b..eb2ae53187d9 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -415,7 +415,7 @@ *-------------------------------------------------------------*/ .macro SAVE_ALL_EXCEPTION marker - st \marker, [sp, 8] + st \marker, [sp, 8] /* orig_r8 */ st r0, [sp, 4] /* orig_r0, needed only for sys calls */ /* Restore r9 used to code the early prologue */ diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 8ae783d20a81..6179de7e07c2 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs) #define orig_r8_IS_SCALL 0x0001 #define orig_r8_IS_SCALL_RESTARTED 0x0002 #define orig_r8_IS_BRKPT 0x0004 -#define orig_r8_IS_EXCPN 0x0004 +#define orig_r8_IS_EXCPN 0x0008 #define orig_r8_IS_IRQ1 0x0010 #define orig_r8_IS_IRQ2 0x0020 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index b9d875a441cc..91eeab81f52d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -452,7 +452,7 @@ tracesys: ; using ERET won't work since next-PC has already committed lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 - st r12, [r11, THREAD_FAULT_ADDR] + st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address ; PRE Sys Call Ptrace hook mov r0, sp ; pt_regs needed -- cgit v1.2.3 From 0e48026ae7abf871e51eaa9183c81ab5bef4c267 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 19 Mar 2013 16:10:38 +0100 Subject: perf/x86: Fix uninitialized pt_regs in intel_pmu_drain_bts_buffer() This patch fixes an uninitialized pt_regs struct in drain BTS function. The pt_regs struct is propagated all the way to the code_get_segment() function from perf_instruction_pointer() and may get garbage. We cannot simply inherit the actual pt_regs from the interrupt because BTS must be flushed on context-switch or when the associated event is disabled. And there we do not have a pt_regs handy. Setting pt_regs to all zeroes may not be the best option but it is not clear what else to do given where the drain_bts_buffer() is called from. In V2, we move the memset() later in the code to avoid doing it when we end up returning early without doing the actual BTS processing. Also dropped the reg.val initialization because it is redundant with the memset() as suggested by PeterZ. Signed-off-by: Stephane Eranian Acked-by: Peter Zijlstra Cc: peterz@infradead.org Cc: sqazi@google.com Cc: ak@linux.intel.com Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/20130319151038.GA25439@quad Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_ds.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 826054a4f2ee..f71c9f09fd4b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void) if (top <= at) return 0; + memset(®s, 0, sizeof(regs)); + ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0, event->hw.last_period); - regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. -- cgit v1.2.3 From b7fef2dd7217d9e3f35c948e87297451e55c9709 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 21 Mar 2013 08:24:11 +0100 Subject: s390/uaccess: fix clear_user_pt() The page table walker variant of clear_user() is supposed to copy the contents of the empty zero page to user space. However since 238ec4ef "[S390] zero page cache synonyms" empty_zero_page is not anymore the page itself but contains the pointer to the empty zero pages. Therefore the page table walker variant of clear_user() copied the address of the first empty zero page and afterwards more or less random data to user space instead of clearing the given user space range. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/lib/uaccess_pt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..6771fdd89377 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -197,7 +197,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) static size_t clear_user_pt(size_t n, void __user *to) { - void *zpage = &empty_zero_page; + void *zpage = (void *) empty_zero_page; long done, size, ret; done = 0; -- cgit v1.2.3 From f564c24103f87dc740c1c293c975565ac46b12ef Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 21 Mar 2013 17:32:36 -0700 Subject: x86, microcode_intel_early: Mark apply_microcode_early() as cpuinit Add missing __cpuinit annotation to apply_microcode_early(). Reported-by: Shaun Ruffell Cc: Fenghua Yu Link: http://lkml.kernel.org/r/20130320170310.GA23362@digium.com Signed-off-by: H. Peter Anvin --- arch/x86/kernel/microcode_intel_early.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index 5992ee8086b7..d893e8ed8ac9 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c @@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) } #endif -static int apply_microcode_early(struct mc_saved_data *mc_saved_data, - struct ucode_cpu_info *uci) +static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, + struct ucode_cpu_info *uci) { struct microcode_intel *mc_intel; unsigned int val[2]; -- cgit v1.2.3 From 909b3fdb0dd4f3db07b2d75425a00a2adb551383 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 12 Mar 2013 15:06:23 +0000 Subject: xen-pciback: notify hypervisor about devices intended to be assigned to guests For MSI-X capable devices the hypervisor wants to write protect the MSI-X table and PBA, yet it can't assume that resources have been assigned to their final values at device enumeration time. Thus have pciback do that notification, as having the device controlled by it is a prerequisite to assigning the device to guests anyway. This is the kernel part of hypervisor side commit 4245d33 ("x86/MSI: add mechanism to fully protect MSI-X table from PV guest accesses") on the master branch of git://xenbits.xen.org/xen.git. CC: stable@vger.kernel.org Signed-off-by: Jan Beulich Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/include/asm/xen/hypercall.h | 4 +-- drivers/xen/fallback.c | 3 +- drivers/xen/xen-pciback/pci_stub.c | 59 +++++++++++++++++++++++++++--------- include/xen/interface/physdev.h | 6 ++++ 4 files changed, 54 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index c20d1ce62dc6..e709884d0ef9 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str) return _hypercall3(int, console_io, cmd, count, str); } -extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); +extern int __must_check xen_physdev_op_compat(int, void *); static inline int HYPERVISOR_physdev_op(int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); if (unlikely(rc == -ENOSYS)) - rc = HYPERVISOR_physdev_op_compat(cmd, arg); + rc = xen_physdev_op_compat(cmd, arg); return rc; } diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c index 0ef7c4d40f86..b04fb64c5a91 100644 --- a/drivers/xen/fallback.c +++ b/drivers/xen/fallback.c @@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg) } EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); -int HYPERVISOR_physdev_op_compat(int cmd, void *arg) +int xen_physdev_op_compat(int cmd, void *arg) { struct physdev_op op; int rc; @@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg) return rc; } +EXPORT_SYMBOL_GPL(xen_physdev_op_compat); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9204126f1560..a2278ba7fb27 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "pciback.h" #include "conf_space.h" #include "conf_space_quirks.h" @@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) static void pcistub_device_release(struct kref *kref) { struct pcistub_device *psdev; + struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data; psdev = container_of(kref, struct pcistub_device, kref); - dev_data = pci_get_drvdata(psdev->dev); + dev = psdev->dev; + dev_data = pci_get_drvdata(dev); - dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); + dev_dbg(&dev->dev, "pcistub_device_release\n"); - xen_unregister_device_domain_owner(psdev->dev); + xen_unregister_device_domain_owner(dev); /* Call the reset function which does not take lock as this * is called from "unbind" which takes a device_lock mutex. */ - __pci_reset_function_locked(psdev->dev); - if (pci_load_and_free_saved_state(psdev->dev, - &dev_data->pci_saved_state)) { - dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); - } else - pci_restore_state(psdev->dev); + __pci_reset_function_locked(dev); + if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) + dev_dbg(&dev->dev, "Could not reload PCI state\n"); + else + pci_restore_state(dev); + + if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { + struct physdev_pci_device ppdev = { + .seg = pci_domain_nr(dev->bus), + .bus = dev->bus->number, + .devfn = dev->devfn + }; + int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, + &ppdev); + + if (err) + dev_warn(&dev->dev, "MSI-X release failed (%d)\n", + err); + } /* Disable the device */ - xen_pcibk_reset_device(psdev->dev); + xen_pcibk_reset_device(dev); kfree(dev_data); - pci_set_drvdata(psdev->dev, NULL); + pci_set_drvdata(dev, NULL); /* Clean-up the device */ - xen_pcibk_config_free_dyn_fields(psdev->dev); - xen_pcibk_config_free_dev(psdev->dev); + xen_pcibk_config_free_dyn_fields(dev); + xen_pcibk_config_free_dev(dev); - psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; - pci_dev_put(psdev->dev); + dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; + pci_dev_put(dev); kfree(psdev); } @@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; + if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { + struct physdev_pci_device ppdev = { + .seg = pci_domain_nr(dev->bus), + .bus = dev->bus->number, + .devfn = dev->devfn + }; + + err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); + if (err) + dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", + err); + } + /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h index 1844d31f4552..7000bb1f6e96 100644 --- a/include/xen/interface/physdev.h +++ b/include/xen/interface/physdev.h @@ -251,6 +251,12 @@ struct physdev_pci_device_add { #define PHYSDEVOP_pci_device_remove 26 #define PHYSDEVOP_restore_msi_ext 27 +/* + * Dom0 should use these two to announce MMIO resources assigned to + * MSI-X capable devices won't (prepare) or may (release) change. + */ +#define PHYSDEVOP_prepare_msix 30 +#define PHYSDEVOP_release_msix 31 struct physdev_pci_device { /* IN */ uint16_t seg; -- cgit v1.2.3 From e651eab0af88aa7a281fe9e8c36c0846552aa7fc Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Mon, 18 Mar 2013 12:24:04 +0100 Subject: ARM: 7677/1: LPAE: Fix mapping in alloc_init_section for unaligned addresses With LPAE enabled, alloc_init_section() does not map the entire address space for unaligned addresses. The issue also reproduced with CMA + LPAE. CMA tries to map 16MB with page granularity mappings during boot. alloc_init_pte() is called and out of 16MB, only 2MB gets mapped and rest remains unaccessible. Because of this OMAP5 boot is broken with CMA + LPAE enabled. Fix the issue by ensuring that the entire addresses are mapped. Signed-off-by: R Sricharan Cc: Catalin Marinas Cc: Christoffer Dall Cc: Santosh Shilimkar Tested-by: Laura Abbott Acked-by: Catalin Marinas Acked-by: Christoffer Dall Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 73 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 47 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..78978945492a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys, - const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) { - pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE /* - * Try a section mapping - end, addr and phys must all be aligned - * to a section boundary. Note that PMDs refer to the individual - * L1 entries, whereas PGDs refer to a group of L1 entries making - * up one logical pointer to an L2 table. + * In classic MMU format, puds and pmds are folded in to + * the pgds. pmd_offset gives the PGD entry. PGDs refer to a + * group of L1 entries making up one logical pointer to + * an L2 table (2MB), where as PMDs refer to the individual + * L1 entries (1MB). Hence increment to get the correct + * offset for odd 1MB sections. + * (See arch/arm/include/asm/pgtable-2level.h) */ - if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { - pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE - if (addr & SECTION_SIZE) - pmd++; + if (addr & SECTION_SIZE) + pmd++; #endif + do { + *pmd = __pmd(phys | type->prot_sect); + phys += SECTION_SIZE; + } while (pmd++, addr += SECTION_SIZE, addr != end); - do { - *pmd = __pmd(phys | type->prot_sect); - phys += SECTION_SIZE; - } while (pmd++, addr += SECTION_SIZE, addr != end); + flush_pmd_entry(pmd); +} - flush_pmd_entry(p); - } else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { /* - * No need to loop; pte's aren't interested in the - * individual L1 entries. + * With LPAE, we must loop over to map + * all the pmds for the given range. */ - alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); - } + next = pmd_addr_end(addr, end); + + /* + * Try a section mapping - addr, next and phys must all be + * aligned to a section boundary. + */ + if (type->prot_sect && + ((addr | next | phys) & ~SECTION_MASK) == 0) { + map_init_section(pmd, addr, next, phys, type); + } else { + alloc_init_pte(pmd, addr, next, + __phys_to_pfn(phys), type); + } + + phys += next - addr; + + } while (pmd++, addr = next, addr != end); } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, do { next = pud_addr_end(addr, end); - alloc_init_section(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type); phys += next - addr; } while (pud++, addr = next, addr != end); } -- cgit v1.2.3 From c40e3641670eb6ebfdb71d4b0c775416ef95f4f0 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 18 Mar 2013 19:44:14 +0100 Subject: ARM: 7679/1: Clear IDIVT hwcap if CONFIG_ARM_THUMB=n Don't advertise support for the SDIV/UDIV thumb instructions if the kernel is not compiled with support for thumb userspace. This is in line with how we remove the THUMB hwcap in these configurations. Acked-by: Will Deacon Cc: Stepan Moskovchenko Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..e2c8bbffb0b1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -484,7 +484,7 @@ static void __init setup_processor(void) list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; #ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif feat_v6_fixup(); -- cgit v1.2.3 From 8164f7af88d9ad3a757bd14f634b23997ee77f6b Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 18 Mar 2013 19:44:15 +0100 Subject: ARM: 7680/1: Detect support for SDIV/UDIV from ISAR0 register The ISAR0 register indicates support for the SDIV and UDIV instructions in both the Thumb and ARM instruction set. Read the register to detect the supported instructions and update the elf_hwcap mask as appropriate. This is better than adding more and more cpuid checks in proc-v7.S for each new cpu variant that supports these instructions. Acked-by: Will Deacon Cc: Stepan Moskovchenko Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 20 ++++++++++++++++++++ arch/arm/mm/proc-v7.S | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e2c8bbffb0b1..f3ac13f69b7a 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -353,6 +353,23 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +static void __init cpuid_init_hwcaps(void) +{ + unsigned int divide_instrs; + + if (cpu_architecture() < CPU_ARCH_ARMv7) + return; + + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + + switch (divide_instrs) { + case 2: + elf_hwcap |= HWCAP_IDIVA; + case 1: + elf_hwcap |= HWCAP_IDIVT; + } +} + static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -483,6 +500,9 @@ static void __init setup_processor(void) snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; + + cpuid_init_hwcaps(); + #ifndef CONFIG_ARM_THUMB elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..bcd3d48922fb 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -420,7 +420,7 @@ __v7_pj4b_proc_info: __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* @@ -430,7 +430,7 @@ __v7_ca7mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* -- cgit v1.2.3 From 120ecfafabec382c4feb79ff159ef42a39b6d33b Mon Sep 17 00:00:00 2001 From: Stepan Moskovchenko Date: Mon, 18 Mar 2013 19:44:16 +0100 Subject: ARM: 7678/1: Work around faulty ISAR0 register in some Krait CPUs Some early versions of the Krait CPU design incorrectly indicate that they only support the UDIV and SDIV instructions in Thumb mode when they actually support them in ARM and Thumb mode. It seems that these CPUs follow the DDI0406B ARM ARM which has two possible values for the divide instructions field, instead of the DDI0406C document which has three possible values. Work around this problem by checking the MIDR against Krait CPUs with this faulty ISAR0 register and force the hwcaps to indicate support in both modes. [sboyd: Rewrote commit text to reflect real reasoning now that we autodetect udiv/sdiv] Signed-off-by: Stepan Moskovchenko Acked-by: Will Deacon Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index bcd3d48922fb..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -433,6 +433,21 @@ __v7_ca15mp_proc_info: __v7_proc __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info + /* + * Qualcomm Inc. Krait processors. + */ + .type __krait_proc_info, #object +__krait_proc_info: + .long 0x510f0400 @ Required ID value + .long 0xff0ffc00 @ Mask for ID + /* + * Some Krait processors don't indicate support for SDIV and UDIV + * instructions in the ARM instruction set, even though they actually + * do support them. + */ + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV + .size __krait_proc_info, . - __krait_proc_info + /* * Match any ARMv7 processor core. */ -- cgit v1.2.3 From 68a154fc53ddd3f7b33e482847a411bf54a50855 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 20 Mar 2013 17:30:30 +0100 Subject: ARM: 7681/1: hw_breakpoint: use warn_once to avoid spam from reset_ctrl_regs() CPU debug features like hardware break, watchpoints can be used only when the debug mode is enabled and available. Unfortunately on OMAP4 based devices, after a CPU power cycle, the debug feature gets disabled which leads to a flood of messages coming from reset_ctrl_regs() which gets called on every CPU_PM_EXIT with CPUidle enabled. So make use of warn_once() so that system is usable. Thanks to Will for pointers and Lokesh for the analysis of the issue. Tested-by: Lokesh Vutla Signed-off-by: Santosh Shilimkar Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/hw_breakpoint.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab90..5dc1aa6f0f7d 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) } if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); + pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } @@ -987,7 +987,7 @@ clear_vcr: isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to disable vector catch\n", cpu); + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } @@ -1007,7 +1007,7 @@ clear_vcr: } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } -- cgit v1.2.3 From 4080d2d11a2d572228c2b8d02406e997b87ba6a5 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 14 Mar 2013 20:31:37 -0700 Subject: ARM: msm: Stop counting before reprogramming clockevent If the clockevent is forcibly reprogrammed to have a different match value we mistakenly assume the timer is not ticking and program a new match value while the timer is running. Although we clear the timer before programming a new match, it's better to stop the timer before clearing it so that we're sure the proper amount of ticks are counted. Failure to do so can lead to missed ticks and system hangs. Signed-off-by: Stephen Boyd Signed-off-by: David Brown --- arch/arm/mach-msm/timer.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027f02fa..f9fd77e8f1f5 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles, { u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); - writel_relaxed(0, event_base + TIMER_CLEAR); + ctrl &= ~TIMER_ENABLE_EN; + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + + writel_relaxed(ctrl, event_base + TIMER_CLEAR); writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); return 0; -- cgit v1.2.3 From 57471c8d3c22873f70813820e6b4d2d1fea9629d Mon Sep 17 00:00:00 2001 From: Laxman Dewangan Date: Fri, 22 Mar 2013 12:35:06 -0600 Subject: ARM: tegra: fix register address of slink controller Fix typo on register address of slink3 controller where register address is wrongly set as 0x7000d480 but it is 0x7000d800. Signed-off-by: Laxman Dewangan Cc: Signed-off-by: Stephen Warren Signed-off-by: Arnd Bergmann --- arch/arm/boot/dts/tegra20.dtsi | 2 +- arch/arm/boot/dts/tegra30.dtsi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 48d00a099ce3..3d3f64d2111a 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -385,7 +385,7 @@ spi@7000d800 { compatible = "nvidia,tegra20-slink"; - reg = <0x7000d480 0x200>; + reg = <0x7000d800 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index 9d87a3ffe998..dbf46c272562 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -372,7 +372,7 @@ spi@7000d800 { compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; - reg = <0x7000d480 0x200>; + reg = <0x7000d800 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; -- cgit v1.2.3 From 087aa036eb79f24b856893190359ba812b460f45 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Mon, 25 Mar 2013 09:31:31 +0800 Subject: powerpc: make additional room in exception vector area The FWNMI region is fixed at 0x7000 and the vector are now overflowing that with allmodconfig. Fix that by moving slb_miss_realmode code out of that region as it doesn't need to be that close to the call sites (it is a _GLOBAL function) Fixes this build error: arch/powerpc/kernel/exceptions-64s.S: Assembler messages: arch/powerpc/kernel/exceptions-64s.S:1304: Error: attempt to move .org backwards Signed-off-by: Chen Gang Signed-off-by: Stephen Rothwell --- arch/powerpc/kernel/exceptions-64s.S | 144 +++++++++++++++++------------------ 1 file changed, 72 insertions(+), 72 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 200afa5bcfb7..56bd92362ce1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1066,78 +1066,6 @@ unrecov_user_slb: #endif /* __DISABLED__ */ -/* - * r13 points to the PACA, r9 contains the saved CR, - * r12 contain the saved SRR1, SRR0 is still ready for return - * r3 has the faulting address - * r9 - r13 are saved in paca->exslb. - * r3 is saved in paca->slb_r3 - * We assume we aren't going to take any exceptions during this procedure. - */ -_GLOBAL(slb_miss_realmode) - mflr r10 -#ifdef CONFIG_RELOCATABLE - mtctr r11 -#endif - - stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ - std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - - bl .slb_allocate_realmode - - /* All done -- return from exception. */ - - ld r10,PACA_EXSLB+EX_LR(r13) - ld r3,PACA_EXSLB+EX_R3(r13) - lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ - - mtlr r10 - - andi. r10,r12,MSR_RI /* check for unrecoverable exception */ - beq- 2f - -.machine push -.machine "power4" - mtcrf 0x80,r9 - mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ -.machine pop - - RESTORE_PPR_PACA(PACA_EXSLB, r9) - ld r9,PACA_EXSLB+EX_R9(r13) - ld r10,PACA_EXSLB+EX_R10(r13) - ld r11,PACA_EXSLB+EX_R11(r13) - ld r12,PACA_EXSLB+EX_R12(r13) - ld r13,PACA_EXSLB+EX_R13(r13) - rfid - b . /* prevent speculative execution */ - -2: mfspr r11,SPRN_SRR0 - ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10,unrecov_slb) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - rfid - b . - -unrecov_slb: - EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) - DISABLE_INTS - bl .save_nvgprs -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception - b 1b - - -#ifdef CONFIG_PPC_970_NAP -power4_fixup_nap: - andc r9,r9,r10 - std r9,TI_LOCAL_FLAGS(r11) - ld r10,_LINK(r1) /* make idle task do the */ - std r10,_NIP(r1) /* equivalent of a blr */ - blr -#endif - .align 7 .globl alignment_common alignment_common: @@ -1335,6 +1263,78 @@ _GLOBAL(opal_mc_secondary_handler) #endif /* CONFIG_PPC_POWERNV */ +/* + * r13 points to the PACA, r9 contains the saved CR, + * r12 contain the saved SRR1, SRR0 is still ready for return + * r3 has the faulting address + * r9 - r13 are saved in paca->exslb. + * r3 is saved in paca->slb_r3 + * We assume we aren't going to take any exceptions during this procedure. + */ +_GLOBAL(slb_miss_realmode) + mflr r10 +#ifdef CONFIG_RELOCATABLE + mtctr r11 +#endif + + stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ + std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + + bl .slb_allocate_realmode + + /* All done -- return from exception. */ + + ld r10,PACA_EXSLB+EX_LR(r13) + ld r3,PACA_EXSLB+EX_R3(r13) + lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ + + mtlr r10 + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- 2f + +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + + RESTORE_PPR_PACA(PACA_EXSLB, r9) + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + rfid + b . /* prevent speculative execution */ + +2: mfspr r11,SPRN_SRR0 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10,unrecov_slb) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) + mtspr SPRN_SRR1,r10 + rfid + b . + +unrecov_slb: + EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) + DISABLE_INTS + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + + +#ifdef CONFIG_PPC_970_NAP +power4_fixup_nap: + andc r9,r9,r10 + std r9,TI_LOCAL_FLAGS(r11) + ld r10,_LINK(r1) /* make idle task do the */ + std r10,_NIP(r1) /* equivalent of a blr */ + blr +#endif + /* * Hash table stuff */ -- cgit v1.2.3 From 4cc3daaf3976bde5345718e86b67cace2f935a09 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Jan 2013 20:48:55 +0000 Subject: ARM: tlbflush: remove ARMv3 support We no longer support any ARMv3 platforms, so remove the old tlbflushing code. Signed-off-by: Will Deacon --- arch/arm/include/asm/tlbflush.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..a223003b0f17 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -52,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); -- cgit v1.2.3 From 05e99c8cf9d4e53ef6e016815db40a89a6156529 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 20 Mar 2013 14:21:10 +0000 Subject: intel-pstate: Use #defines instead of hard-coded values. They are defined in coreboot (MSR_PLATFORM) and the other one is already defined in msr-index.h. Let's use those. Signed-off-by: Konrad Rzeszutek Wilk Acked-by: Viresh Kumar Acked-by: Dirk Brandewie Signed-off-by: Rafael J. Wysocki --- arch/x86/include/uapi/asm/msr-index.h | 1 + drivers/cpufreq/intel_pstate.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 892ce40a7470..7a060f4b411f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -44,6 +44,7 @@ #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) +#define MSR_PLATFORM_INFO 0x000000ce #define MSR_MTRRcap 0x000000fe #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b662529072f7..ad72922919ed 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void) static int intel_pstate_min_pstate(void) { u64 value; - rdmsrl(0xCE, value); + rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 40) & 0xFF; } static int intel_pstate_max_pstate(void) { u64 value; - rdmsrl(0xCE, value); + rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 8) & 0xFF; } @@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void) { u64 value; int nont, ret; - rdmsrl(0x1AD, value); + rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); nont = intel_pstate_max_pstate(); ret = ((value) & 255); if (ret <= nont) -- cgit v1.2.3 From d17cfb34dc5eb527b98448f3999aac52311d438b Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 22 Mar 2013 21:47:51 +0000 Subject: ARM64: early_printk: Fix check for CONFIG_ARM64_64K_PAGES The 'CONFIG_' prefix is not implicit in IS_ENABLED(). Signed-off-by: Ben Hutchings Cc: Arnd Bergmann Cc: Paul Bolle Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 224b44ab534e..70b8cd4021c4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) { unsigned long size, mask; - bool page64k = IS_ENABLED(ARM64_64K_PAGES); + bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES); pgd_t *pgd; pud_t *pud; pmd_t *pmd; -- cgit v1.2.3 From f9294e989fa6f2990da155242db03cea1550cac8 Mon Sep 17 00:00:00 2001 From: Stuart Yoder Date: Fri, 22 Mar 2013 09:12:13 +0000 Subject: powerpc: define the conditions where the ePAPR idle hcall can be supported For 32-bit, CONFIG_EPAPR_PARAVIRT pulls in both epapr_paravirt.c and epapr_hcalls.c which contains the 32-bit paravirt idle loop. For 64-bit, the paravirt idle loop is in idle_book3e.S and that source file is included only if CONFIG_PPC_BOOK3E_64 defined. This patch makes that dependency for 64-bit explicit. Fixes these build errors: arch/powerpc/kernel/built-in.o: In function `restore_pblist_ptr': ftrace.c:(.toc+0xdc0): undefined reference to `epapr_ev_idle_start' ftrace.c:(.toc+0xdd0): undefined reference to `epapr_ev_idle' Signed-off-by: Stuart Yoder Signed-off-by: Stephen Rothwell --- arch/powerpc/kernel/epapr_paravirt.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index f3eab8594d9f..d44a571e45a7 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -23,8 +23,10 @@ #include #include +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) extern void epapr_ev_idle(void); extern u32 epapr_ev_idle_start[]; +#endif bool epapr_paravirt_enabled; @@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void) for (i = 0; i < (len / 4); i++) { patch_instruction(epapr_hypercall_start + i, insts[i]); +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) patch_instruction(epapr_ev_idle_start + i, insts[i]); +#endif } +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) if (of_get_property(hyper_node, "has-idle", NULL)) ppc_md.power_save = epapr_ev_idle; +#endif epapr_paravirt_enabled = true; -- cgit v1.2.3 From 82d9b0d0c6c4cbd5d0022d7df5e6bf071a9eb6c7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Jan 2013 12:07:40 +0000 Subject: ARM: cache: remove ARMv3 support code This is only used by 740t, which is a v4 core and (by my reading of the datasheet for the CPU) ignores CRm for the cp15 cache flush operation, making the v4 cache implementation in cache-v4.S sufficient for this CPU. Tested with 740T core-tile on Integrator/AP baseboard. Acked-by: Hyok S. Choi Acked-by: Greg Ungerer Signed-off-by: Will Deacon --- arch/arm/include/asm/glue-cache.h | 8 --- arch/arm/mm/Kconfig | 5 +- arch/arm/mm/Makefile | 1 - arch/arm/mm/cache-v3.S | 137 -------------------------------------- arch/arm/mm/proc-arm740.S | 2 +- 5 files changed, 2 insertions(+), 151 deletions(-) delete mode 100644 arch/arm/mm/cache-v3.S (limited to 'arch') diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15704ed..ea289e1435e7 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d17328730..4045c4931a30 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -43,7 +43,7 @@ config CPU_ARM740T depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_CACHE_V3 # although the core is v4t + select CPU_CACHE_V4 select CPU_CP15_MPU select CPU_PABRT_LEGACY help @@ -469,9 +469,6 @@ config CPU_PABRT_V7 bool # The cache model -config CPU_CACHE_V3 - bool - config CPU_CACHE_V4 bool diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa2756f..9e51be96f635 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o -obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fadece8d3..000000000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null @@ -1,137 +0,0 @@ -/* - * linux/arch/arm/mm/cache-v3.S - * - * Copyright (C) 1997-2002 Russell king - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include "proc-macros.S" - -/* - * flush_icache_all() - * - * Unconditionally clean and invalidate the entire icache. - */ -ENTRY(v3_flush_icache_all) - mov pc, lr -ENDPROC(v3_flush_icache_all) - -/* - * flush_user_cache_all() - * - * Invalidate all cache entries in a particular address - * space. - * - * - mm - mm_struct describing address space - */ -ENTRY(v3_flush_user_cache_all) - /* FALLTHROUGH */ -/* - * flush_kern_cache_all() - * - * Clean and invalidate the entire cache. - */ -ENTRY(v3_flush_kern_cache_all) - /* FALLTHROUGH */ - -/* - * flush_user_cache_range(start, end, flags) - * - * Invalidate a range of cache entries in the specified - * address space. - * - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - flags - vma_area_struct flags describing address space - */ -ENTRY(v3_flush_user_cache_range) - mov ip, #0 - mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * coherent_kern_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_kern_range) - /* FALLTHROUGH */ - -/* - * coherent_user_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_user_range) - mov r0, #0 - mov pc, lr - -/* - * flush_kern_dcache_area(void *page, size_t size) - * - * Ensure no D cache aliasing occurs, either with itself or - * the I cache - * - * - addr - kernel address - * - size - region size - */ -ENTRY(v3_flush_kern_dcache_area) - /* FALLTHROUGH */ - -/* - * dma_flush_range(start, end) - * - * Clean and invalidate the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_flush_range) - mov r0, #0 - mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * dma_unmap_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_unmap_area) - teq r2, #DMA_TO_DEVICE - bne v3_dma_flush_range - /* FALLTHROUGH */ - -/* - * dma_map_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_map_area) - mov pc, lr -ENDPROC(v3_dma_unmap_area) -ENDPROC(v3_dma_map_area) - - .globl v3_flush_kern_cache_louis - .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all - - __INITDATA - - @ define struct cpu_cache_fns (see and proc-macros.S) - define_cache_functions v3 diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f20..2088234978c4 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -145,5 +145,5 @@ __arm740_proc_info: .long arm740_processor_functions .long 0 .long 0 - .long v3_cache_fns @ cache model + .long v4_cache_fns @ cache model .size __arm740_proc_info, . - __arm740_proc_info -- cgit v1.2.3 From 3ef52f2a00efc5f83ae6d40e55cae96ce275893f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 22 Jan 2013 10:37:51 +0000 Subject: ARM: mm: fix numerous hideous errors in proc-arm740.S The setup code in proc-arm740.S is completely broken and, as far as I can tell, always has been. I was >this< close to ripping it out, when a 740t core-tile materialised in the office, so I've had a crack at fixing things up: - Fix the ram/flash area calculations so that we actually set the condition flags before testing them... - Fix the proc_info structure so that __cpu_io_mmu_flags are defined as 0, placing the __cpu_flush pointer at the correct offset - Re-number the registers used during __arm740_setup so that we don't clobber the machine ID et al - Advertise Thumb support via the hwcaps, since 740T is the only 740 implementation. Acked-by: Hyok S. Choi Signed-off-by: Will Deacon --- arch/arm/mm/proc-arm740.S | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 2088234978c4..fde2d2a794cf 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -77,24 +77,27 @@ __arm740_setup: mcr p15, 0, r0, c6, c0 @ set area 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit mcr p15, 0, r0, c6, c1 @ set area 1, RAM ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) + cmp r3, #0 + moveq r0, #0 + beq 2f + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH +2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH mov r0, #0x06 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable @@ -137,10 +140,11 @@ __arm740_proc_info: .long 0x41807400 .long 0xfffffff0 .long 0 + .long 0 b __arm740_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT .long cpu_arm740_name .long arm740_processor_functions .long 0 -- cgit v1.2.3 From 794fe85da484353dedcb5dc6da14d923d0645fc3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 22 Jan 2013 19:11:38 +0000 Subject: ARM: mm: remove broken condition check for v4 flushing There's no point having a conditional cache flush if we don't know the state of the condition beforehand. This patch makes the cacheflush in v4_flush_user_cache_range unconditional. signed-off-by: will deacon --- arch/arm/mm/cache-v4.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be677..a7ba68f59f0c 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all) ENTRY(v4_flush_user_cache_range) #ifdef CONFIG_CPU_CP15 mov ip, #0 - mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache + mcr p15, 0, ip, c7, c7, 0 @ flush ID cache mov pc, lr #else /* FALLTHROUGH */ -- cgit v1.2.3 From d455bac223d6f9858b29a29272756243ec3d3904 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 22 Jan 2013 11:00:54 +0000 Subject: ARM: modules: don't export cpu_set_pte_ext when !MMU cpu_set_pte_ext is only guaranteed to be defined when CONFIG_MMU, so don't export it to modules otherwise. Signed-off-by: Will Deacon --- arch/arm/mm/proc-syms.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b4d6d4..054b491ff764 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -17,7 +17,9 @@ #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); +#ifdef CONFIG_MMU EXPORT_SYMBOL(cpu_set_pte_ext); +#endif #else EXPORT_SYMBOL(processor); #endif -- cgit v1.2.3 From 2f3edfd7e27ad4206acbc2ae99c9df5f46353024 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Tue, 26 Mar 2013 16:46:07 +0800 Subject: ARM: imx: fix sync issue between imx_cpu_die and imx_cpu_kill There is a sync issue with hotplug operation. It's possible that when imx_cpu_kill gets running on primary core, the imx_cpu_die execution on the core which is to be killed hasn't been finished yet. The problem will very likely be hit when running suspend without no_console_suspend setting on kernel cmdline. It uses cpu jumping argument register to sync imx_cpu_die and imx_cpu_kill. The register will be set in imx_cpu_die and imx_cpu_kill will wait for the register being cleared to actually kill the cpu. Signed-off-by: Shawn Guo Cc: --- arch/arm/mach-imx/common.h | 2 ++ arch/arm/mach-imx/hotplug.c | 12 ++++++++++++ arch/arm/mach-imx/src.c | 12 ++++++++++++ 3 files changed, 26 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bfcec5b..5bf4a97ab241 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *); extern void imx_enable_cpu(int cpu, bool enable); extern void imx_set_cpu_jump(int cpu, void *jump_addr); +extern u32 imx_get_cpu_arg(int cpu); +extern void imx_set_cpu_arg(int cpu, u32 arg); extern void v7_cpu_resume(void); extern u32 *pl310_get_save_ptr(void); #ifdef CONFIG_SMP diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda2..361a253e2b63 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void) void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); + /* + * We use the cpu jumping argument register to sync with + * imx_cpu_kill() which is running on cpu0 and waiting for + * the register being cleared to kill the cpu. + */ + imx_set_cpu_arg(cpu, ~0); cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + + while (imx_get_cpu_arg(cpu) == 0) + if (time_after(jiffies, timeout)) + return 0; imx_enable_cpu(cpu, false); + imx_set_cpu_arg(cpu, 0); return 1; } diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f1555c59b..09a742f8c7ab 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) src_base + SRC_GPR1 + cpu * 8); } +u32 imx_get_cpu_arg(int cpu) +{ + cpu = cpu_logical_map(cpu); + return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); +} + +void imx_set_cpu_arg(int cpu, u32 arg) +{ + cpu = cpu_logical_map(cpu); + writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); +} + void imx_src_prepare_restart(void) { u32 val; -- cgit v1.2.3 From ff931c821bab6713a52b768b0cd7ee7e90713b36 Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Thu, 21 Mar 2013 16:34:52 +0530 Subject: ARM: OMAP: clocks: Delay clk inits atleast until slab is initialized clk inits on OMAP happen quite early, even before slab is available. The dependency comes from the fact that the timer init code starts to use clocks and hwmod and we need clocks to be initialized by then. There are various problems doing clk inits this early, one is, not being able to do dynamic clk registrations and hence the dependency on clk-private.h. The other is, inability to debug early kernel crashes without enabling DEBUG_LL and earlyprintk. Doing early clk init also exposed another instance of a kernel panic due to a BUG() when CONFIG_DEBUG_SLAB is enabled. [ 0.000000] Kernel BUG at c01174f8 [verbose debug info unavailable] [ 0.000000] Internal error: Oops - BUG: 0 [#1] SMP ARM [ 0.000000] Modules linked in: [ 0.000000] CPU: 0 Not tainted (3.9.0-rc1-12179-g72d48f9 #6) [ 0.000000] PC is at __kmalloc+0x1d4/0x248 [ 0.000000] LR is at __clk_init+0x2e0/0x364 [ 0.000000] pc : [] lr : [] psr: 600001d3 [ 0.000000] sp : c076ff28 ip : c065cefc fp : c0441f54 [ 0.000000] r10: 0000001c r9 : 000080d0 r8 : c076ffd4 [ 0.000000] r7 : c074b578 r6 : c0794d88 r5 : 00000040 r4 : 00000000 [ 0.000000] r3 : 00000000 r2 : c07cac70 r1 : 000080d0 r0 : 0000001c [ 0.000000] Flags: nZCv IRQs off FIQs off Mode SVC_32 ISA ARM Segment kernel [ 0.000000] Control: 10c53c7d Table: 8000404a DAC: 00000017 [ 0.000000] Process swapper (pid: 0, stack limit = 0xc076e240) [ 0.000000] Stack: (0xc076ff28 to 0xc0770000) [ 0.000000] ff20: 22222222 c0794ec8 c06546e8 00000000 00000040 c0794d88 [ 0.000000] ff40: c074b578 c076ffd4 c07951c8 c076e000 00000000 c0441f54 c074b578 c076ffd4 [ 0.000000] ff60: c0793828 00000040 c0794d88 c074b578 c076ffd4 c0776900 c076e000 c07272ac [ 0.000000] ff80: 2f800000 c074c968 c07f93d0 c0719780 c076ffa0 c076ff98 00000000 00000000 [ 0.000000] ffa0: 00000000 00000000 00000000 00000001 c074cd6c c077b1ec 8000406a c0715724 [ 0.000000] ffc0: 00000000 00000000 00000000 00000000 00000000 c074c968 10c53c7d c0776974 [ 0.000000] ffe0: c074cd6c c077b1ec 8000406a 411fc092 00000000 80008074 00000000 00000000 [ 0.000000] [] (__kmalloc+0x1d4/0x248) from [] (__clk_init+0x2e0/0x364) [ 0.000000] [] (__clk_init+0x2e0/0x364) from [] (omap4xxx_clk_init+0xbc/0x140) [ 0.000000] [] (omap4xxx_clk_init+0xbc/0x140) from [] (setup_arch+0x15c/0x284) [ 0.000000] [] (setup_arch+0x15c/0x284) from [] (start_kernel+0x7c/0x334) [ 0.000000] [] (start_kernel+0x7c/0x334) from [<80008074>] (0x80008074) [ 0.000000] Code: e5883004 e1a00006 e28dd00c e8bd8ff0 (e7f001f2) [ 0.000000] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.000000] Kernel panic - not syncing: Attempted to kill the idle task! It was a know issue, that slab allocations would fail when common clock core tries to cache parent pointers for mux clocks on OMAP, and hence a patch 'clk: Allow late cache allocation for clk->parents, commit 7975059d' was added to work this problem around. A BUG() within kmalloc() with CONFIG_DEBUG_SLAB enabled was completely overlooked causing this regression. More details on the issue reported can be found here, http://www.mail-archive.com/linux-omap@vger.kernel.org/msg85932.html With all these issues around clk inits happening way too early, it makes sense to at least move them to a point where dynamic memory allocations are possible. So move them to a point just before the timer code starts using clocks and hwmod. This should at least pave way for clk inits on OMAP moving to dynamic clock registrations instead of using the static macros defined in clk-private.h. The issue with kernel panic while CONFIG_DEBUG_SLAB is enabled was reported by Piotr Haber and Tony Lindgren and this patch fixes the reported issue as well. Reported-by: Piotr Haber Reported-by: Tony Lindgren Signed-off-by: Rajendra Nayak Acked-by: Santosh Shilimkar Reviewed-by: Mike Turquette Acked-by: Paul Walmsley Cc: stable@vger.kernel.org # v3.8 Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/common.h | 3 +++ arch/arm/mach-omap2/io.c | 18 ++++++++++++------ arch/arm/mach-omap2/timer.c | 4 ++++ 3 files changed, 19 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 40f4a03d728f..d6ba13e1c540 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -293,5 +293,8 @@ extern void omap_reserve(void); struct omap_hwmod; extern int omap_dss_reset(struct omap_hwmod *); +/* SoC specific clock initializer */ +extern int (*omap_clk_init)(void); + #endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd65387b..5c445ca1e271 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -54,6 +54,12 @@ #include "prm3xxx.h" #include "prm44xx.h" +/* + * omap_clk_init: points to a function that does the SoC-specific + * clock initializations + */ +int (*omap_clk_init)(void); + /* * The machine specific code may provide the extra mapping besides the * default mapping provided here. @@ -397,7 +403,7 @@ void __init omap2420_init_early(void) omap242x_clockdomains_init(); omap2420_hwmod_init(); omap_hwmod_init_postsetup(); - omap2420_clk_init(); + omap_clk_init = omap2420_clk_init; } void __init omap2420_init_late(void) @@ -427,7 +433,7 @@ void __init omap2430_init_early(void) omap243x_clockdomains_init(); omap2430_hwmod_init(); omap_hwmod_init_postsetup(); - omap2430_clk_init(); + omap_clk_init = omap2430_clk_init; } void __init omap2430_init_late(void) @@ -462,7 +468,7 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3430_init_early(void) @@ -500,7 +506,7 @@ void __init ti81xx_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3_init_late(void) @@ -568,7 +574,7 @@ void __init am33xx_init_early(void) am33xx_clockdomains_init(); am33xx_hwmod_init(); omap_hwmod_init_postsetup(); - am33xx_clk_init(); + omap_clk_init = am33xx_clk_init; } #endif @@ -593,7 +599,7 @@ void __init omap4430_init_early(void) omap44xx_clockdomains_init(); omap44xx_hwmod_init(); omap_hwmod_init_postsetup(); - omap4xxx_clk_init(); + omap_clk_init = omap4xxx_clk_init; } void __init omap4430_init_late(void) diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf17a8f..f62b509ed08d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void) clksrc_nr, clksrc_src) \ void __init omap##name##_gptimer_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ @@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \ clksrc_nr, clksrc_src) \ void __init omap##name##_sync32k_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ /* Enable the use of clocksource="gp_timer" kernel parameter */ \ -- cgit v1.2.3 From d3eb2c89e7ba996e8781b22a6e7d0a895ef55630 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Fri, 22 Mar 2013 10:34:28 -0400 Subject: xen/mmu: Move the setting of pvops.write_cr3 to later phase in bootup. We move the setting of write_cr3 from the early bootup variant (see git commit 0cc9129d75ef8993702d97ab0e49542c15ac6ab9 "x86-64, xen, mmu: Provide an early version of write_cr3.") to a more appropiate location. This new location sets all of the other non-early variants of pvops calls - and most importantly is before the alternative_asm mechanism kicks in. Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8e34938c57d..6afbb2ca9a0a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3) __xen_write_cr3(true, cr3); xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ - - pv_mmu_ops.write_cr3 = &xen_write_cr3; } #endif @@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void) #endif #ifdef CONFIG_X86_64 + pv_mmu_ops.write_cr3 = &xen_write_cr3; SetPagePinned(virt_to_page(level3_user_vsyscall)); #endif xen_mark_init_mm_pinned(); -- cgit v1.2.3 From e73081d9afd4b5fe285d2a585a73d9c6f73ab0dd Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Tue, 26 Mar 2013 10:26:15 +0000 Subject: ARM: ux500: Apply the TCPM and TCDM locations and sizes to dbx5x0 DT This fixes a regression introduced by commit: 05ec260 mfd:db8500-prcmu: update resource passing All DBx5x0 based SoCs have access to two Tightly Coupled Memory (TCM) locations based on the PRCMU itself. One area from program memory (TCPM) and one for data memory (TCDM). The PRCMU needs to know where these are in order to function correctly. However, these are currently passed though platform device resources, which can only be obtained if Device Tree booting isn't in use. Thus we must also support them in DT by supplying them through the PRCMU node. Signed-off-by: Lee Jones Signed-off-by: Linus Walleij --- arch/arm/boot/dts/dbx5x0.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 9de93096601a..aaa63d0a8096 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi @@ -191,8 +191,8 @@ prcmu: prcmu@80157000 { compatible = "stericsson,db8500-prcmu"; - reg = <0x80157000 0x1000>; - reg-names = "prcmu"; + reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; + reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; interrupts = <0 47 0x4>; #address-cells = <1>; #size-cells = <1>; -- cgit v1.2.3 From 26135256d3e7d4d12c6268623edc1740888c041f Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 27 Mar 2013 10:47:38 +0000 Subject: ARM: ux500: Stop passing ios_handler() as an MMCI power controlling call-back This fixes a regression introduced during the v3.9 merge window. Now MMCI on/off functionality is using the regulator framework from the MMCI driver, if we provide the ios_handler call-back we essentially duplicate functionality, which causes a large mess and lots of booting issues. Signed-off-by: Lee Jones Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/board-mop500-sdi.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c27102..7f2cb6c5e2c1 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { #endif struct mmci_platform_data mop500_sdi0_data = { - .ios_handler = mop500_sdi0_ios_handler, .ocr_mask = MMC_VDD_29_30, .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | -- cgit v1.2.3 From 265c3c0a64df50c0e8fe55cfe8af8851ed5feca5 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 27 Mar 2013 14:13:53 +0000 Subject: ARM: ux500: Enable the clock controlling Ethernet on Snowball This fixes a regression introduced by common clk enablement. On some u8500 based boards, the FMSC clock which is usually used for flash, is wired up to the SMSC911x Ethernet driver. However, the SMSC911x doesn't have common clk support yet, rendering it unusable. Prior to the introduction of common clk the FMSC clock was default on; however, common clk disables all clocks by default and insists drivers take responsibility to enable theirs. This fix enables the FMSC clock on Snowball, subsequently turning on the SMSC911x Ethernet chip. It will be removed when the driver is compatible with common clk. Signed-off-by: Lee Jones Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/board-mop500.c | 12 ++++++++++++ arch/arm/mach-ux500/board-mop500.h | 1 + arch/arm/mach-ux500/cpu-db8500.c | 5 +++-- 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b03457881c4b..87d2d7b38ce9 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev) regulator_put(prox_regulator); } +void mop500_snowball_ethernet_clock_enable(void) +{ + struct clk *clk; + + clk = clk_get_sys("fsmc", NULL); + if (!IS_ERR(clk)) + clk_prepare_enable(clk); +} + static struct cryp_platform_data u8500_cryp1_platform_data = { .mem_to_engine = { .dir = STEDMA40_MEM_TO_PERIPH, @@ -683,6 +693,8 @@ static void __init snowball_init_machine(void) mop500_audio_init(parent); mop500_uart_init(parent); + mop500_snowball_ethernet_clock_enable(); + /* This board has full regulator constraints */ regulator_has_full_constraints(); } diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f5d90d..d38951be70df 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void); void __init snowball_pinmaps_init(void); void __init hrefv60_pinmaps_init(void); void mop500_audio_init(struct device *parent); +void mop500_snowball_ethernet_clock_enable(void); int __init mop500_uib_init(void); void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf7bbe3..f1a581844372 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -312,9 +312,10 @@ static void __init u8500_init_machine(void) /* Pinmaps must be in place before devices register */ if (of_machine_is_compatible("st-ericsson,mop500")) mop500_pinmaps_init(); - else if (of_machine_is_compatible("calaosystems,snowball-a9500")) + else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { snowball_pinmaps_init(); - else if (of_machine_is_compatible("st-ericsson,hrefv60+")) + mop500_snowball_ethernet_clock_enable(); + } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) hrefv60_pinmaps_init(); else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} /* TODO: Add pinmaps for ccu9540 board. */ -- cgit v1.2.3 From 2992714d431976c4b154875bd18ba61bf4df3b93 Mon Sep 17 00:00:00 2001 From: Eric Hutter Date: Mon, 18 Mar 2013 19:48:56 +0100 Subject: ARM: kirkwood: Fix chip-delay for GoFlex Net This fixes "Too few good blocks within range" issues on GoFlex Net by setting chip-delay to 40. The basic problem was discussed at http://forum.doozan.com/read.php?2,7451 Signed-off-by: Eric Hutter Acked-by: Andrew Lunn Cc: # v3.6.x Signed-off-by: Jason Cooper --- arch/arm/boot/dts/kirkwood-goflexnet.dts | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index bd83b8fc7c83..c3573be7b92c 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -77,6 +77,7 @@ }; nand@3000000 { + chip-delay = <40>; status = "okay"; partition@0 { -- cgit v1.2.3 From 7f23f62fc31c5c97947414c0937a72e08a947a41 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Wed, 20 Mar 2013 16:09:35 +0100 Subject: arm: mvebu: Use local interrupt only for the timer 0 The commit 3a6f08a37 "arm: mvebu: Add support for local interrupt", managed the 28th first interrupts as local interrupt to match the hardware specification. Among these interrupts there are the Gigabits Ethernet ones used by the mvneta driver. Unfortunately the state of the percpu_irq API prevents the driver to use it. Indeed the interrupts have to be freed when the .stop() function is called. As the free_percpu_irq() function don't disable the interrupt line, we have to do it on each CPU before calling this. The function disable_percpu_irq() only disable the percpu on the current CPU and there is no function which allows to disable a percpu irq on a given CPU. Waiting for the extension of the percpu_irq API, this fix allows to use again the mvneta driver. Signed-off-by: Gregory CLEMENT Tested-by: Thomas Petazzoni Tested-by: Masami Hiramatsu Signed-off-by: Jason Cooper --- arch/arm/mach-mvebu/irq-armada-370-xp.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58271de..6a9195e10579 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c @@ -44,6 +44,8 @@ #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) + #define ACTIVE_DOORBELLS (8) static DEFINE_RAW_SPINLOCK(irq_controller_lock); @@ -62,7 +64,7 @@ static void armada_370_xp_irq_mask(struct irq_data *d) #ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); else @@ -79,7 +81,7 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) #ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); else @@ -147,7 +149,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); irq_set_status_flags(virq, IRQ_LEVEL); - if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { irq_set_percpu_devid(virq); irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, handle_percpu_devid_irq); -- cgit v1.2.3 From 0d0644ebc63bce3784ebea28d0355071b4e90525 Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Sat, 23 Mar 2013 13:56:58 +0100 Subject: ARM: Kirkwood: fix unused mvsdio gpio pins mvsdio_platform_data allows to pass card detect and write protect gpio numbers to the driver. Some kirkwood boards don't use both pins as they are not connected, and don't set the corresponding value in platform_data. This will leave the unset values in platform_data initialized as 0, which is in fact a valid gpio pin. mvsdio will grab that pin and configure it as gpio, which in turn breaks nand controller as mpp0 also carries nand_io2. This patch fixes the above by initializing unused gpio functions in the platform_data with an invalid (-1) value. Signed-off-by: Sebastian Hesselbarth Reported-by: Soeren Moch Signed-off-by: Jason Cooper --- arch/arm/mach-kirkwood/guruplug-setup.c | 2 ++ arch/arm/mach-kirkwood/openrd-setup.c | 1 + arch/arm/mach-kirkwood/rd88f6281-setup.c | 1 + 3 files changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736cbbf8..08dd739aa709 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c @@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = { static struct mvsdio_platform_data guruplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ + .gpio_card_detect = -1, + .gpio_write_protect = -1, }; static struct gpio_led guruplug_led_pins[] = { diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69fdc937..6a6eb548307d 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c @@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = { static struct mvsdio_platform_data openrd_mvsdio_data = { .gpio_card_detect = 29, /* MPP29 used as SD card detect */ + .gpio_write_protect = -1, }; static unsigned int openrd_mpp_config[] __initdata = { diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b48926b..d24223166e06 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = { static struct mvsdio_platform_data rd88f6281_mvsdio_data = { .gpio_card_detect = 28, + .gpio_write_protect = -1, }; static unsigned int rd88f6281_mpp_config[] __initdata = { -- cgit v1.2.3 From 835f6322c691600da5b7fb526f07b1d21d0c88ef Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Tue, 26 Mar 2013 21:44:46 +0000 Subject: arm: orion5x: fix orion5x.dtsi gpio parameters orion5x.dtsi is missing the gpio alias as well as including a typo ('ngpio' instead of 'ngpios') that prevented the orion-gpio driver from loading. Also missing were the interrupt-controller properties. This patches resolves those glitches. Signed-off-by: Alexander Clouter Acked-by: Andrew Lunn Signed-off-by: Jason Cooper --- arch/arm/boot/dts/orion5x.dtsi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f81ed9..7c2a3263c166 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -13,6 +13,9 @@ compatible = "marvell,orion5x"; interrupt-parent = <&intc>; + aliases { + gpio0 = &gpio0; + }; intc: interrupt-controller { compatible = "marvell,orion-intc", "marvell,intc"; interrupt-controller; @@ -32,7 +35,9 @@ #gpio-cells = <2>; gpio-controller; reg = <0x10100 0x40>; - ngpio = <32>; + ngpios = <32>; + interrupt-controller; + #interrupt-cells = <2>; interrupts = <6>, <7>, <8>, <9>; }; -- cgit v1.2.3 From e0656a9cbbd0444aaec493ed8a9420d07c356639 Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Tue, 26 Mar 2013 21:44:49 +0000 Subject: arm: orion5x: correct IRQ used in dtsi for mv_cesa The crypto functionality in the orion5x dtsi uses the Ethernet IRQ and so things do not work and there is much grumbling at boot time. The IRQ for the crypto should be 28, and not 22, and that is what this patch corrects. Signed-off-by: Alexander Clouter Acked-by: Andrew Lunn Signed-off-by: Jason Cooper --- arch/arm/boot/dts/orion5x.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 7c2a3263c166..f7bec3b1ba32 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -96,7 +96,7 @@ reg = <0x90000 0x10000>, <0xf2200000 0x800>; reg-names = "regs", "sram"; - interrupts = <22>; + interrupts = <28>; status = "okay"; }; }; -- cgit v1.2.3 From 879d68a445dd7073a8c022fcdd21dc27eca7f192 Mon Sep 17 00:00:00 2001 From: Ryan Press Date: Tue, 26 Mar 2013 16:32:31 -0700 Subject: arm: mvebu: Fix pinctrl for Armada 370 Mirabox SDIO port. The previous configuration used the wrong "clk" pin. Without this change mv_sdio worked because the bootloader would set the pin up, but with a bootloader that does not set the pin, mv_sdio fails to detect any card. I have tested this change using a mwifiex_sdio wireless network adapter over the SDIO interface. Signed-off-by: Ryan Press Signed-off-by: Jason Cooper --- arch/arm/boot/dts/armada-370-mirabox.dts | 2 +- arch/arm/boot/dts/armada-370.dtsi | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57dd9f30..3234875824dc 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts @@ -54,7 +54,7 @@ }; mvsdio@d00d4000 { - pinctrl-0 = <&sdio_pins2>; + pinctrl-0 = <&sdio_pins3>; pinctrl-names = "default"; status = "okay"; /* diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d138020e..a195debb67d3 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -59,6 +59,12 @@ "mpp50", "mpp51", "mpp52"; marvell,function = "sd0"; }; + + sdio_pins3: sdio-pins3 { + marvell,pins = "mpp48", "mpp49", "mpp50", + "mpp51", "mpp52", "mpp53"; + marvell,function = "sd0"; + }; }; gpio0: gpio@d0018100 { -- cgit v1.2.3 From ed176886b68fbc450ddbe808684a142fcad72b56 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 29 Mar 2013 11:02:30 -0700 Subject: ia64 idle: delete stale (*idle)() function pointer Commit 3e7fc708eb41 ("ia64 idle: delete pm_idle") in 3.9-rc1 didn't finish the job, leaving an un-initialized reference to (*idle)(). [ Haven't seen a crash from this - but seems like we are just being lucky that "idle" is zero so it does get initialized before we jump to randomland - Len ] Reported-by: Lars-Peter Clausen Signed-off-by: Len Brown Signed-off-by: Tony Luck Signed-off-by: Linus Torvalds --- arch/ia64/kernel/process.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index e34f565f595a..6f7dc8b7b35c 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -291,7 +291,6 @@ cpu_idle (void) } if (!need_resched()) { - void (*idle)(void); #ifdef CONFIG_SMP min_xtp(); #endif @@ -299,9 +298,7 @@ cpu_idle (void) if (mark_idle) (*mark_idle)(1); - if (!idle) - idle = default_idle; - (*idle)(); + default_idle(); if (mark_idle) (*mark_idle)(0); #ifdef CONFIG_SMP -- cgit v1.2.3 From ff7f3efb9abf986f4ecd8793a9593f7ca4d6431a Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 29 Mar 2013 13:50:21 -0400 Subject: tile: expect new initramfs name from hypervisor file system The current Tilera boot infrastructure now provides the initramfs to Linux as a Tilera-hypervisor file named "initramfs", rather than "initramfs.cpio.gz", as before. (This makes it reasonable to use other compression techniques than gzip on the file without having to worry about the name causing confusion.) Adapt to use the new name, but also fall back to checking for the old name. Cc'ing to stable so that older kernels will remain compatible with newer Tilera boot infrastructure. Signed-off-by: Chris Metcalf Cc: stable@vger.kernel.org --- arch/tile/kernel/setup.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c6..7a5aa1a7864e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD -/* - * Note that the kernel can potentially support other compression - * techniques than gz, though we don't do so by default. If we ever - * decide to do so we can either look for other filename extensions, - * or just allow a file with this name to be compressed with an - * arbitrary compressor (somewhat counterintuitively). - */ static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; +static char __initdata initramfs_file[128] = "initramfs"; static int __init setup_initramfs_file(char *str) { @@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for an "initramfs.cpio.gz" file in the hvfs. - * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs. + * We look for a file called "initramfs" in the hvfs. If there is one, we + * allocate some memory for it and it will be unpacked to the initramfs. + * If it's compressed, the initd code will uncompress it first. */ static void __init load_hv_initrd(void) { @@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { - if (set_initramfs_file) + if (set_initramfs_file) { pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); - return; + return; + } else { + /* Try old backwards-compatible name. */ + fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); + if (fd == HV_ENOENT) + return; + } } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); -- cgit v1.2.3 From a3d9052c6296ad3398d3ad649c3c682c3e7ecfa6 Mon Sep 17 00:00:00 2001 From: Mac Lin Date: Mon, 25 Mar 2013 17:23:33 +0800 Subject: ARM: cns3xxx: fix mapping of private memory region Since commit 0536bdf33faf (ARM: move iotable mappings within the vmalloc region), the Cavium CNS3xxx cannot boot anymore. This is caused by the pre-defined iotable mappings is not in the vmalloc region. This patch move the iotable mappings into the vmalloc region, and merge the MPCore private memory region (containing the SCU, the GIC and the TWD) as a single region. Signed-off-by: Mac Lin Signed-off-by: Anton Vorontsov Cc: stable@vger.kernel.org [v3.3+] --- arch/arm/mach-cns3xxx/core.c | 16 +++------------- arch/arm/mach-cns3xxx/include/mach/cns3xxx.h | 16 ++++++++-------- 2 files changed, 11 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26cc0cb..52e4bb5cf12d 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -22,19 +22,9 @@ static struct map_desc cns3xxx_io_desc[] __initdata = { { - .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), - .length = SZ_4K, + .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), + .length = SZ_8K, .type = MT_DEVICE, }, { .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e57f289..b1021aafa481 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h @@ -94,10 +94,10 @@ #define RTC_INTR_STS_OFFSET 0x34 #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ -#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ +#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ -#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 +#define CNS3XXX_PM_BASE_VIRT 0xFB001000 #define PM_CLK_GATE_OFFSET 0x00 #define PM_SOFT_RST_OFFSET 0x04 @@ -109,7 +109,7 @@ #define PM_PLL_HM_PD_OFFSET 0x1C #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ -#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 +#define CNS3XXX_UART0_BASE_VIRT 0xFB002000 #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 @@ -130,7 +130,7 @@ #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ -#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 +#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 #define TIMER1_COUNTER_OFFSET 0x00 #define TIMER1_AUTO_RELOAD_OFFSET 0x04 @@ -227,16 +227,16 @@ * Testchip peripheral and fpga gic regions */ #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ -#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 +#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ -#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 +#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 -#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 +#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ -#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 +#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 -- cgit v1.2.3 From 71196a26d85be2aae2e587dfd3c3190a651133c6 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 27 Mar 2013 02:20:45 +0000 Subject: sparc:remove unused declaration smp_boot_cpus() smp_boot_cpus() was replaced smp_prepare_cpus() long ago, and it no longer needed, so delete it. Signed-off-by: Kefeng Wang Acked-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/include/asm/smp_32.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index b73da3c5f10a..68503d3d1b9e 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -46,7 +46,6 @@ void sun4m_init_smp(void); void sun4d_init_smp(void); void smp_callin(void); -void smp_boot_cpus(void); void smp_store_cpu_info(int); void smp_resched_interrupt(void); -- cgit v1.2.3 From 54df2db36c93bbb8c757f172d97c577040de6abb Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 29 Mar 2013 03:44:43 +0000 Subject: sparc/srmmu: clear trailing edge of bitmap properly srmmu_nocache_bitmap is cleared by bit_map_init(). But bit_map_init() attempts to clear by memset(), so it can't clear the trailing edge of bitmap properly on big-endian architecture if the number of bits is not a multiple of BITS_PER_LONG. Actually, the number of bits in srmmu_nocache_bitmap is not always a multiple of BITS_PER_LONG. It is calculated as below: bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; srmmu_nocache_size is decided proportionally by the amount of system RAM and it is rounded to a multiple of PAGE_SIZE. SRMMU_NOCACHE_BITMAP_SHIFT is defined as (PAGE_SHIFT - 4). So it can only be said that bitmap_bits is a multiple of 16. This fixes the problem by using bitmap_clear() instead of memset() in bit_map_init() and this also uses BITS_TO_LONGS() to calculate correct size at bitmap allocation time. Signed-off-by: Akinobu Mita Cc: "David S. Miller" Cc: sparclinux@vger.kernel.org Signed-off-by: David S. Miller --- arch/sparc/lib/bitext.c | 6 +----- arch/sparc/mm/srmmu.c | 4 +++- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 48d00e72ce15..8ec4e9c0251a 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len) void bit_map_init(struct bit_map *t, unsigned long *map, int size) { - - if ((size & 07) != 0) - BUG(); - memset(map, 0, size>>3); - + bitmap_zero(map, size); memset(t, 0, sizeof *t); spin_lock_init(&t->lock); t->map = map; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c38bb72e3e80..036c2797dece 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void) SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); - srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); + srmmu_nocache_bitmap = + __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); -- cgit v1.2.3 From 9a0ac1b6af111f2fa0f2a39e9bbbebb74da7d26a Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 29 Mar 2013 03:44:44 +0000 Subject: sparc/iommu: fix typo s/265KB/256KB/ IOMMU_NPTES is 64K PTEs, so the size is 256KB (= 64K * sizeof(iopte_t)) Signed-off-by: Akinobu Mita Cc: "David S. Miller" Cc: sparclinux@vger.kernel.org Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0f4f7191fbba..28f96f27c768 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -34,7 +34,7 @@ #define IOMMU_RNGE IOMMU_RNGE_256MB #define IOMMU_START 0xF0000000 #define IOMMU_WINSIZE (256*1024*1024U) -#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ +#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ /* srmmu.c */ -- cgit v1.2.3 From bf3aece8af91eabea2cfb2cbe528abc394695163 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 30 Mar 2013 00:28:26 +0000 Subject: sparc:cleanup unused code in smp_32.h After genirq and generic clockevent support at sparc32, smp4m_irq_rotate(), prof_multiplier() and prof_counter() are no longer used and should be removed. Find more info from commit 6baa9b20 & 62f08283. Signed-off-by: Kefeng Wang Acked-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/include/asm/smp_32.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index 68503d3d1b9e..3c8917f054de 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); void cpu_panic(void); -extern void smp4m_irq_rotate(int cpu); /* * General functions that each host system must provide. @@ -106,9 +105,6 @@ extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) -#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier -#define prof_counter(__cpu) cpu_data(__cpu).counter - void smp_setup_cpu_possible_map(void); #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From a2d34dd41212032c03e77bc30c2023725def841a Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sat, 30 Mar 2013 11:44:22 +0000 Subject: sparc: use generic headers Use "generic-y" to add generic headers where possible Signed-off-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/include/asm/Kbuild | 4 ++++ arch/sparc/include/asm/cputime.h | 6 ------ arch/sparc/include/asm/emergency-restart.h | 6 ------ arch/sparc/include/asm/mutex.h | 9 --------- arch/sparc/include/asm/serial.h | 6 ------ 5 files changed, 4 insertions(+), 27 deletions(-) delete mode 100644 arch/sparc/include/asm/cputime.h delete mode 100644 arch/sparc/include/asm/emergency-restart.h delete mode 100644 arch/sparc/include/asm/mutex.h delete mode 100644 arch/sparc/include/asm/serial.h (limited to 'arch') diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index e26d430ce2fd..f73884bb286a 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,11 +2,15 @@ generic-y += clkdev.h +generic-y += cputime.h generic-y += div64.h +generic-y += emergency-restart.h generic-y += exec.h generic-y += local64.h +generic-y += mutex.h generic-y += irq_regs.h generic-y += local.h generic-y += module.h +generic-y += serial.h generic-y += trace_clock.h generic-y += word-at-a-time.h diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h deleted file mode 100644 index 1a642b81e019..000000000000 --- a/arch/sparc/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_CPUTIME_H -#define __SPARC_CPUTIME_H - -#include - -#endif /* __SPARC_CPUTIME_H */ diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sparc/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/arch/sparc/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h deleted file mode 100644 index f90d61c28059..000000000000 --- a/arch/sparc/include/asm/serial.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_SERIAL_H -#define __SPARC_SERIAL_H - -#define BASE_BAUD ( 1843200 / 16 ) - -#endif /* __SPARC_SERIAL_H */ -- cgit v1.2.3 From cbf1ef6b3345d2cc7e62407eec6a6f72a8b1346f Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 31 Mar 2013 07:01:47 +0000 Subject: sparc: use asm-generic version of types.h In sparc headers we use the following pattern: #if defined(__sparc__) && defined(__arch64__) sparc64 specific stuff #else sparc32 specific stuff #endif In types.h this pattern was not followed and here we only checked for __sparc__ for no good reason. It was a left-over from long time ago. I checked other architectures - and most of them do not have any such checks. And all the recently merged versions uses the asm-generic version. Signed-off-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/include/asm/Kbuild | 1 + arch/sparc/include/uapi/asm/Kbuild | 1 - arch/sparc/include/uapi/asm/types.h | 17 ----------------- 3 files changed, 1 insertion(+), 18 deletions(-) delete mode 100644 arch/sparc/include/uapi/asm/types.h (limited to 'arch') diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index f73884bb286a..ff18e3cfb6b1 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -13,4 +13,5 @@ generic-y += local.h generic-y += module.h generic-y += serial.h generic-y += trace_clock.h +generic-y += types.h generic-y += word-at-a-time.h diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ce175aff71b7..b5843ee09fb5 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -44,7 +44,6 @@ header-y += swab.h header-y += termbits.h header-y += termios.h header-y += traps.h -header-y += types.h header-y += uctx.h header-y += unistd.h header-y += utrap.h diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h deleted file mode 100644 index 383d156cde9c..000000000000 --- a/arch/sparc/include/uapi/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _SPARC_TYPES_H -#define _SPARC_TYPES_H -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -#if defined(__sparc__) - -#include - -#endif /* defined(__sparc__) */ - -#endif /* defined(_SPARC_TYPES_H) */ -- cgit v1.2.3 From 8f08d6667287241f6818d35e02b223fb5df97cf1 Mon Sep 17 00:00:00 2001 From: Nigel Roberts Date: Mon, 1 Apr 2013 23:03:22 +1100 Subject: ARM: Kirkwood: Fix typo in the definition of ix2-200 rebuild LED In the conversion to pinctrl, an error in the pins for the rebuild LED was introduced. This patch assigns the correct pins and includes the correct name for the LED in kirkwood-iomega_ix2_200.dts. Signed-off-by: Nigel Roberts Cc: # v3.8.x Signed-off-by: Jason Cooper --- arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 93c3afbef9ee..3694e94f6e99 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts @@ -96,11 +96,11 @@ marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { - marvell,pins = "mpp44"; + marvell,pins = "mpp46"; marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { - marvell,pins = "mpp45"; + marvell,pins = "mpp47"; marvell,function = "gpio"; }; @@ -157,14 +157,14 @@ gpios = <&gpio0 16 0>; linux,default-trigger = "default-on"; }; - health_led1 { + rebuild_led { + label = "status:white:rebuild_led"; + gpios = <&gpio1 4 0>; + }; + health_led { label = "status:red:health_led"; gpios = <&gpio1 5 0>; }; - health_led2 { - label = "status:white:health_led"; - gpios = <&gpio1 4 0>; - }; backup_led { label = "status:blue:backup_led"; gpios = <&gpio0 15 0>; -- cgit v1.2.3 From ea81531de23cf92085e0601178fae920141caa5d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 21 Mar 2013 12:50:39 +0100 Subject: s390/uaccess: fix page table walk When translating user space addresses to kernel addresses the follow_table() function had two bugs: - PROT_NONE mappings could be read accessed via the kernel mapping. That is e.g. putting a filename into a user page, then protecting the page with PROT_NONE and afterwards issuing the "open" syscall with a pointer to the filename would incorrectly succeed. - when walking the page tables it used the pgd/pud/pmd/pte primitives which with dynamic page tables give no indication which real level of page tables is being walked (region2, region3, segment or page table). So in case of an exception the translation exception code passed to __handle_fault() is not necessarily correct. This is not really an issue since __handle_fault() doesn't evaluate the code. Only in case of e.g. a SIGBUS this code gets passed to user space. If user space can do something sane with the value is a different question though. To fix these issues don't use any Linux primitives. Only walk the page tables like the hardware would do it, however we leave quite some checks away since we know that we only have full size page tables and each index is within bounds. In theory this should fix all issues... Signed-off-by: Heiko Carstens Reviewed-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 1 + arch/s390/lib/uaccess_pt.c | 81 +++++++++++++++++++++++++++-------------- 2 files changed, 55 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..1686d8f0f878 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -344,6 +344,7 @@ extern unsigned long MODULES_END; #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 6771fdd89377..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address * contains the (negative) exception code. */ -static __always_inline unsigned long follow_table(struct mm_struct *mm, - unsigned long addr, int write) +#ifdef CONFIG_64BIT +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; + unsigned long *table = (unsigned long *)__pa(mm->pgd); + + switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: + table = table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x39UL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION2: + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3aUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION3: + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3bUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_SEGMENT: + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) + return -0x10UL; + if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { + if (write && (*table & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + + (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); + } + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + } + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) + return -0x11UL; + if (write && (*table & _PAGE_RO)) + return -0x04UL; + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); +} - pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return -0x3aUL; +#else /* CONFIG_64BIT */ - pud = pud_offset(pgd, addr); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return -0x3bUL; +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) +{ + unsigned long *table = (unsigned long *)__pa(mm->pgd); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) return -0x10UL; - if (pmd_large(*pmd)) { - if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) - return -0x04UL; - return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); - } - if (unlikely(pmd_bad(*pmd))) - return -0x10UL; - - ptep = pte_offset_map(pmd, addr); - if (!pte_present(*ptep)) + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) return -0x11UL; - if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) + if (write && (*table & _PAGE_RO)) return -0x04UL; - - return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); } +#endif /* CONFIG_64BIT */ + static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { -- cgit v1.2.3 From 765a0cac566c938821ae26efb4aa53b7502ee82c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 23 Mar 2013 10:29:01 +0100 Subject: s390/mm: provide emtpy check_pgt_cache() function All architectures need to provide a check_pgt_cache() function. The s390 one got lost somewhere. So reintroduce it to prevent future compile errors e.g. if Thomas Gleixner's idle loop rework patches get merged. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 1686d8f0f878..4a5443118cfb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1532,7 +1532,8 @@ extern int s390_enable_sie(void); /* * No page table caches to initialise */ -#define pgtable_cache_init() do { } while (0) +static inline void pgtable_cache_init(void) { } +static inline void check_pgt_cache(void) { } #include -- cgit v1.2.3 From b22227944b8fe92b19150b4c36421e37979d9a16 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Fri, 29 Mar 2013 10:20:56 -0400 Subject: xen/mmu: On early bootup, flush the TLB when changing RO->RW bits Xen provided pagetables. Occassionaly on a DL380 G4 the guest would crash quite early with this: (XEN) d244:v0: unhandled page fault (ec=0003) (XEN) Pagetable walk from ffffffff84dc7000: (XEN) L4[0x1ff] = 00000000c3f18067 0000000000001789 (XEN) L3[0x1fe] = 00000000c3f14067 000000000000178d (XEN) L2[0x026] = 00000000dc8b2067 0000000000004def (XEN) L1[0x1c7] = 00100000dc8da067 0000000000004dc7 (XEN) domain_crash_sync called from entry.S (XEN) Domain 244 (vcpu#0) crashed on cpu#3: (XEN) ----[ Xen-4.1.3OVM x86_64 debug=n Not tainted ]---- (XEN) CPU: 3 (XEN) RIP: e033:[] (XEN) RFLAGS: 0000000000000216 EM: 1 CONTEXT: pv guest (XEN) rax: 0000000000000000 rbx: ffffffff81785f88 rcx: 000000000000003f (XEN) rdx: 0000000000000000 rsi: 00000000dc8da063 rdi: ffffffff84dc7000 The offending code shows it to be a loop writting the value zero (%rax) in the %rdi (the L4 provided by Xen) register: 0: 44 00 00 add %r8b,(%rax) 3: 31 c0 xor %eax,%eax 5: b9 40 00 00 00 mov $0x40,%ecx a: 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) 11: 00 00 13: ff c9 dec %ecx 15:* 48 89 07 mov %rax,(%rdi) <-- trapping instruction 18: 48 89 47 08 mov %rax,0x8(%rdi) 1c: 48 89 47 10 mov %rax,0x10(%rdi) which fails. xen_setup_kernel_pagetable recycles some of the Xen's page-table entries when it has switched over to its Linux page-tables. Right before try to clear the page, we make a hypercall to change it from _RO to _RW and that works (otherwise we would hit an BUG()). And the _RW flag is set for that page: (XEN) L1[0x1c7] = 001000004885f067 0000000000004dc7 The error code is 3, so PFEC_page_present and PFEC_write_access, so page is present (correct), and we tried to write to the page, but a violation occurred. The one theory is that the the page entries in hardware (which are cached) are not up to date with what we just set. Especially as we have just done an CR3 write and flushed the multicalls. This patch does solve the problem by flusing out the TLB page entry after changing it from _RO to _RW and we don't hit this issue anymore. Fixed-Oracle-Bug: 16243091 [ON OCCASIONS VM START GOES INTO 'CRASH' STATE: CLEAR_PAGE+0X12 ON HP DL380 G4] Reported-and-Tested-by: Saar Maoz Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6afbb2ca9a0a..a4ea92477e01 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr) } /* Set the page permissions on an identity-mapped pages */ -static void set_page_prot(void *addr, pgprot_t prot) +static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) { unsigned long pfn = __pa(addr) >> PAGE_SHIFT; pte_t pte = pfn_pte(pfn, prot); - if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) + if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) BUG(); } +static void set_page_prot(void *addr, pgprot_t prot) +{ + return set_page_prot_flags(addr, prot, UVMF_NONE); +} #ifdef CONFIG_X86_32 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { @@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, unsigned long addr) { if (*pt_base == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_base)++; } if (*pt_end == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_end)--; } -- cgit v1.2.3 From 8b4b9f27e57584f3d90e0bb84cf800ad81cfe3a1 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 15 Feb 2013 12:21:43 -0500 Subject: x86: remove the x32 syscall bitmask from syscall_get_nr() Commit fca460f95e928bae373daa8295877b6905bc62b8 simplified the x32 implementation by creating a syscall bitmask, equal to 0x40000000, that could be applied to x32 syscalls such that the masked syscall number would be the same as a x86_64 syscall. While that patch was a nice way to simplify the code, it went a bit too far by adding the mask to syscall_get_nr(); returning the masked syscall numbers can cause confusion with callers that expect syscall numbers matching the x32 ABI, e.g. unmasked syscall numbers. This patch fixes this by simply removing the mask from syscall_get_nr() while preserving the other changes from the original commit. While there are several syscall_get_nr() callers in the kernel, most simply check that the syscall number is greater than zero, in this case this patch will have no effect. Of those remaining callers, they appear to be few, seccomp and ftrace, and from my testing of seccomp without this patch the original commit definitely breaks things; the seccomp filter does not correctly filter the syscalls due to the difference in syscall numbers in the BPF filter and the value from syscall_get_nr(). Applying this patch restores the seccomp BPF filter functionality on x32. I've tested this patch with the seccomp BPF filters as well as ftrace and everything looks reasonable to me; needless to say general usage seemed fine as well. Signed-off-by: Paul Moore Link: http://lkml.kernel.org/r/20130215172143.12549.10292.stgit@localhost Cc: Cc: Will Drewry Cc: H. Peter Anvin Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/syscall.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b62592..2e188d68397c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax & __SYSCALL_MASK; + return regs->orig_ax; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax & __SYSCALL_MASK; + regs->ax = regs->orig_ax; } static inline long syscall_get_error(struct task_struct *task, -- cgit v1.2.3 From 55985e15b80beb339d3e332f00a26003deff14e6 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 2 Apr 2013 01:29:29 +0200 Subject: ARM: mxs: Slow down the I2C clock speed Slow down the I2C clock speed on M28 and SPS1 as it turns out the I2C block in i.MX28 can not operate stable enough with the bus running at 400kHz. Note that the driver used by Freescale runs the bus at 250kHz when 400kHz speed is selected, but the mainline Linux kernel runs the bus at actual 400kHz and that's where it is occasionally unstable. Play safe and run the bus at 100kHz. Signed-off-by: Marek Vasut Cc: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx28-m28evk.dts | 1 - arch/arm/boot/dts/imx28-sps1.dts | 1 - 2 files changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17c3a29..fd36e1cca104 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -152,7 +152,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; sgtl5000: codec@0a { diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8aa7fff..6c6a5442800a 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -70,7 +70,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; rtc: rtc@51 { -- cgit v1.2.3 From 698613b63817f2f6ca79831cd1c37aae67025323 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 3 Apr 2013 16:33:26 +0100 Subject: ARM: iWMMXt: always enable iWMMXt support with PJ4 CPUs Jason Cooper reports these build errors: arch/arm/kernel/built-in.o: In function `iwmmxt_do': /.../arch/arm/kernel/pj4-cp0.c:36: undefined reference to `iwmmxt_task_release' /.../arch/arm/kernel/pj4-cp0.c:40: undefined reference to `iwmmxt_task_switch' make: *** [vmlinux] Error 1 This is caused because the PJ4 code explicitly references the iWMMXt code, but doesn't require it to be built. Fix this by ensuring that iWMMXt is always enabled with PJ4. Reported-by: Jason Cooper Signed-off-by: Russell King --- arch/arm/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 13b739469c51..12ea3b3d49a9 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1183,9 +1183,9 @@ config ARM_NR_BANKS default 8 config IWMMXT - bool "Enable iWMMXt support" + bool "Enable iWMMXt support" if !CPU_PJ4 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 - default y if PXA27x || PXA3xx || ARCH_MMP + default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 help Enable support for iWMMXt context switching at run time if running on a CPU that supports it. -- cgit v1.2.3 From 6e7aceeb7c70b9ebad79bcfe91fcf738826e8e6d Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 25 Mar 2013 17:02:48 +0100 Subject: ARM: 7682/1: cache-l2x0: fix masking of RTL revision numbering and set_debug init Commit b8db6b8 (ARM: 7547/4: cache-l2x0: add support for Aurora L2 cache ctrl) moved the masking of the part ID which caused the RTL version to be lost. Commit 6248d06 (ARM: 7545/1: cache-l2x0: make outer_cache_fns a field of l2x0_of_data) changed how .set_debug is initialized. Both commits break commit 74ddcdb (ARM: 7608/1: l2x0: Only set .set_debug on PL310 r3p0 and earlier) which uses the RTL version to conditionally set .set_debug function pointer. Commit b8db6b8 also caused the printed cache ID to be missing the version information. Fix this by reverting how the part number is masked so the RTL version info is maintained. The cache-id-part DT property does not set the RTL bits so masking them should have no effect. Also, re-arrange the order of the function pointer init so the .set_debug function can be overridden. Reported-by: Paolo Pisati Signed-off-by: Rob Herring Cc: Gregory CLEMENT Cc: Yehuda Yitschak Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) int lockregs; int i; - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: lockregs = 8; break; @@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) if (cache_id_part_number_from_dt) cache_id = cache_id_part_number_from_dt; else - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) - & L2X0_CACHE_ID_PART_MASK; + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; /* Determine the number of ways */ - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: if (aux & (1 << 16)) ways = 16; @@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { .flush_all = l2x0_flush_all, .inv_all = l2x0_inv_all, .disable = l2x0_disable, - .set_debug = pl310_set_debug, }, }; @@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) data->save(); of_init = true; - l2x0_init(l2x0_base, aux_val, aux_mask); - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); + l2x0_init(l2x0_base, aux_val, aux_mask); return 0; } -- cgit v1.2.3 From 93dc68876b608da041fe40ed39424b0fcd5aa2fb Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 26 Mar 2013 23:35:04 +0100 Subject: ARM: 7684/1: errata: Workaround for Cortex-A15 erratum 798181 (TLBI/DSB operations) On Cortex-A15 (r0p0..r3p2) the TLBI/DSB are not adequately shooting down all use of the old entries. This patch implements the erratum workaround which consists of: 1. Dummy TLBIMVAIS and DSB on the CPU doing the TLBI operation. 2. Send IPI to the CPUs that are running the same mm (and ASID) as the one being invalidated (or all the online CPUs for global pages). 3. CPU receiving the IPI executes a DMB and CLREX (part of the exception return code already). Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/Kconfig | 10 ++++++ arch/arm/include/asm/highmem.h | 7 ++++ arch/arm/include/asm/mmu_context.h | 2 ++ arch/arm/include/asm/tlbflush.h | 15 +++++++++ arch/arm/kernel/smp_tlb.c | 66 ++++++++++++++++++++++++++++++++++++++ arch/arm/mm/context.c | 3 +- 6 files changed, 102 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 12ea3b3d49a9..1cacda426a0e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420 to deadlock. This workaround puts DSB before executing ISB if an abort may occur on cache maintenance. +config ARM_ERRATA_798181 + bool "ARM errata: TLBI/DSB failure on Cortex-A15" + depends on CPU_V7 && SMP + help + On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not + adequately shooting down all use of the old entries. This + option enables the Linux kernel workaround for this erratum + which sends an IPI to the CPUs that are running the same ASID + as the one being invalidated. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484d..91b99abe7a95 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323c..a7b85e0d0cc1 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..9e9c041358ca 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd0300531399..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@ #include #include +#include /**********************************************************************/ @@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} +#else +static int erratum_a15_798181(void) +{ + return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ + dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, + NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ + int cpu; + cpumask_t mask = { CPU_BITS_NONE }; + + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + /* + * We only need to send an IPI if the other CPUs are running + * the same ASID as the one being invalidated. There is no + * need for locking around the active_asids check since the + * switch_mm() function has at least one dmb() (as required by + * this workaround) in case a context switch happens on + * another CPU after the condition below. + */ + if (atomic64_read(&mm->context.id) == + atomic64_read(&per_cpu(active_asids, cpu))) + cpumask_set_cpu(cpu, &mask); + } + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +} + void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); + broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) @@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); + broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) &ta, 1); } else local_flush_tlb_page(vma, uaddr); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); + broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, @@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, &ta, 1); } else local_flush_tlb_range(vma, start, end); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); + broadcast_tlb_a15_erratum(); } void flush_bp_all(void) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -static DEFINE_PER_CPU(atomic64_t, active_asids); +DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; @@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); + dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); -- cgit v1.2.3 From 6f3d90e55660ba42301b5e9c7eed332cc9f70fd7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Mar 2013 11:17:55 +0100 Subject: ARM: 7685/1: delay: use private ticks_per_jiffy field for timer-based delay ops Commit 70264367a243 ("ARM: 7653/2: do not scale loops_per_jiffy when using a constant delay clock") fixed a problem with our timer-based delay loop, where loops_per_jiffy is scaled by cpufreq yet used directly by the timer delay ops. This patch fixes the problem in a more elegant way by keeping a private ticks_per_jiffy field in the delay ops, independent of loops_per_jiffy and therefore not subject to scaling. The loop-based delay continues to use loops_per_jiffy directly, as it should. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/delay.h | 2 +- arch/arm/kernel/smp.c | 3 --- arch/arm/lib/delay.c | 8 +++++--- 3 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a81..dff714d886d5 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 79078edbb9bc..1f2ccccaf009 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c7..64dbfa57204a 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) static void __timer_const_udelay(unsigned long xloops) { unsigned long long loops = xloops; - loops *= loops_per_jiffy; + loops *= arm_delay_ops.ticks_per_jiffy; __timer_delay(loops >> UDELAY_SHIFT); } @@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) pr_info("Switching to timer-based delay loop\n"); delay_timer = timer; lpj_fine = timer->freq / HZ; - loops_per_jiffy = lpj_fine; + + /* cpufreq may scale loops_per_jiffy, so keep a private copy */ + arm_delay_ops.ticks_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; - arm_delay_ops.const_clock = true; + delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); -- cgit v1.2.3 From b21e023ba4003fe4b5c32540e4aee5991c019e92 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 2 Apr 2013 22:11:46 +0100 Subject: ARM: 7689/1: add unwind annotations to ftrace asm Add unwind annotations to the ftrace assembly code so that the function tracer's stacktracing options (func_stack_trace, etc.) work when CONFIG_ARM_UNWIND is enabled. Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) */ .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) .endm .macro mcount_get_lr reg @@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) .endm ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE mov ip, lr ldmia sp!, {lr} @@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) #else __mcount #endif +UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) +UNWIND(.fnstart) __ftrace_caller +UNWIND(.fnend) ENDPROC(ftrace_caller) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) __ftrace_graph_caller +UNWIND(.fnend) ENDPROC(ftrace_graph_caller) #endif -- cgit v1.2.3 From 4e1db26a0b42e2b6e27c05d68adcc01709c2eed2 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Wed, 3 Apr 2013 12:24:45 +0100 Subject: ARM: 7690/1: mm: fix CONFIG_LPAE typos CONFIG_LPAE doesn't exist: the correct option is CONFIG_ARM_LPAE, so fix up the two typos under arch/arm/. The fix to head.S is slightly scary, but this is just for setting up an early io-mapping for the serial port when running on a big-endian, LPAE system. Since these systems don't exist in the wild (at least, I have no access to one outside of kvmtool, which doesn't provide a serial port suitable for earlyprintk), then we can revisit the code later if it causes any problems. Signed-off-by: Paul Bolle Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/head.S | 2 +- arch/arm/kernel/setup.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae77..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -267,7 +267,7 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] -#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f3ac13f69b7a..d343a6c3a6d1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -544,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE if (bank->start + size < bank->start) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); -- cgit v1.2.3 From ad8c396936e328f5344e1881afde9e28d5f2045f Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 2 Apr 2013 22:59:29 +0000 Subject: MIPS: Unbreak function tracer for 64-bit kernel. Commit 58b69401c797 [MIPS: Function tracer: Fix broken function tracing] completely broke the function tracer for 64-bit kernels. The symptom is a system hang very early in the boot process. The fix: Remove/fix $sp adjustments for 64-bit case. Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Cc: Al Cooper Cc: viric@viric.name Cc: stable@vger.kernel.org # 3.8.x Signed-off-by: Ralf Baechle --- arch/mips/kernel/mcount.S | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 165867673357..33d067148e61 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,10 +46,9 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#else - PTR_ADDIU sp, PT_SIZE #endif -.endm + PTR_ADDIU sp, PT_SIZE + .endm .macro RETURN_BACK jr ra @@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - addiu sp,sp,8 +#ifdef CONFIG_32BIT + addiu sp,sp,8 +#else + nop +#endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop -- cgit v1.2.3 From 143f0f659986f921731ab340d9415be479762c1a Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Thu, 4 Apr 2013 12:47:01 +0000 Subject: MIPS: Alchemy: Fix typo "CONFIG_DEBUG_PCI" Commit 7517de348663b08a808aff44b5300e817157a568 ("MIPS: Alchemy: Redo PCI as platform driver") added a reference to CONFIG_DEBUG_PCI. Change it to CONFIG_PCI_DEBUG, as that is a valid Kconfig macro. Also add a newline to a debugging printk that this fix enables. Signed-off-by: Paul Bolle Cc: Sergei Shtylyov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ralf Baechle --- arch/mips/pci/pci-alchemy.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd67..d1faece21b6a 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -19,7 +19,7 @@ #include #include -#ifdef CONFIG_DEBUG_PCI +#ifdef CONFIG_PCI_DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) do {} while (0) @@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, if (status & (1 << 29)) { *data = 0xffffffff; error = -1; - DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", access_type, bus->number, device); } else if ((status >> 28) & 0xf) { DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", -- cgit v1.2.3 From aaa9fad32fa80878e6935c5668098c9b55b31458 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Mon, 25 Mar 2013 09:39:54 +0000 Subject: MIPS: Kconfig: Rename SNIPROM too CONFIG_SNIPROM was renamed to CONFIG_FW_SNIPROM in v3.8. Let's rename SNIPROM itself too. Signed-off-by: Paul Bolle Cc: linux-mips@linux-mips.org; Cc: linux-kernel@vger.kernel.org Cc: Thomas Bogendoerfer Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21ff562a..b2df47604759 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -657,7 +657,7 @@ config SNI_RM bool "SNI RM200/300/400" select FW_ARC if CPU_LITTLE_ENDIAN select FW_ARC32 if CPU_LITTLE_ENDIAN - select SNIPROM if CPU_BIG_ENDIAN + select FW_SNIPROM if CPU_BIG_ENDIAN select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 select CEVT_R4K @@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION config FW_ARC32 bool -config SNIPROM +config FW_SNIPROM bool config BOOT_ELF32 -- cgit v1.2.3 From ed1197f9317c960a199f491779e056c572506dd3 Mon Sep 17 00:00:00 2001 From: EunBong Song Date: Sun, 24 Mar 2013 22:18:35 +0000 Subject: MIPS: Fix build error cavium-octeon without CONFIG_SMP Singed-off-by: EunBong Song Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ralf Baechle --- arch/mips/cavium-octeon/setup.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f743..b0baa299f899 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) static void octeon_generic_shutdown(void) { - int cpu, i; + int i; +#ifdef CONFIG_SMP + int cpu; +#endif struct cvmx_bootmem_desc *bootmem_desc; void *named_block_array_ptr; -- cgit v1.2.3 From adb3789264c4e8567113a0e764ad30ce6e8737f3 Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Mon, 1 Apr 2013 18:14:28 +0000 Subject: MIPS: Fix ISA level which causes secondary cache init bypassing and more The commit a96102be70 introduced set_isa() where compatible ISA info is also set aside from the one gets passed in. It means, for example, 1004K will have MIPS_CPU_ISA_M32R2/M32R1/II/I flags. This leads to things like the following inappropriate: if (c->isa_level == MIPS_CPU_ISA_M32R1 || c->isa_level == MIPS_CPU_ISA_M32R2 || c->isa_level == MIPS_CPU_ISA_M64R1 || c->isa_level == MIPS_CPU_ISA_M64R2) This patch fixes it. Signed-off-by: Deng-Cheng Zhu Cc: Steven J. Hill Cc: linux-mips@linux-mips.org Signed-off-by: Ralf Baechle --- arch/mips/kernel/cpu-probe.c | 6 ++---- arch/mips/kernel/traps.c | 2 +- arch/mips/mm/c-r4k.c | 6 ++---- arch/mips/mm/sc-mips.c | 6 ++---- 4 files changed, 7 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..ed80c3844345 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1226,10 +1226,8 @@ __cpuinit void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb87..c3abb88170fc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif - if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) status_set |= ST0_XX; if (cpu_has_dsp) status_set |= ST0_MX; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7b..2078915eacb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) return; default: - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1ba..df96da7e939b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) c->scache.flags |= MIPS_CACHE_NOT_PRESENT; /* Ignore anything but MIPSxx processors */ - if (c->isa_level != MIPS_CPU_ISA_M32R1 && - c->isa_level != MIPS_CPU_ISA_M32R2 && - c->isa_level != MIPS_CPU_ISA_M64R1 && - c->isa_level != MIPS_CPU_ISA_M64R2) + if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ -- cgit v1.2.3 From 80fa8181aabeb10389c8810f97e2737eb084ce8f Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 25 Mar 2013 13:43:14 +0100 Subject: MIPS: Delete definition of SA_RESTORER. SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever supported its use and no libc was using it, so the entire sa-restorer functionality was removed with lmo commit 39bffc12c3580ab [Zap sa_restorer.] for 2.5.48 retaining only the SA_RESTORER definition as a reminder to avoid accidental reuse of the mask bit. Upstream cdef9602fbf1871a43f0f1b5cea10dd0f275167d [signal: always clear sa_restorer on execve] adds code that assumes sa_sigaction has an sa_restorer field, if SA_RESTORER is defined which would break MIPS. So remove the SA_RESTORER definition before the v3.8.4 merge. Signed-off-by: Ralf Baechle (cherry picked from commit 17da8d63add23830892ac4dc2cbb3b5d4ffb79a8) --- arch/mips/include/uapi/asm/signal.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3a..addb9f556b71 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. + * + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever + * supported its use and no libc was using it, so the entire sa-restorer + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 + * retaining only the SA_RESTORER definition as a reminder to avoid + * accidental reuse of the mask bit. */ #define SA_ONSTACK 0x08000000 #define SA_RESETHAND 0x80000000 @@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* Only for o32 */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -- cgit v1.2.3 From 918708245e92941df16a634dc201b407d12bcd91 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 3 Apr 2013 15:47:33 +0100 Subject: x86: Fix rebuild with EFI_STUB enabled eboot.o and efi_stub_$(BITS).o didn't get added to "targets", and hence their .cmd files don't get included by the build machinery, leading to the files always getting rebuilt. Rather than adding the two files individually, take the opportunity and add $(VMLINUX_OBJS) to "targets" instead, thus allowing the assignment at the top of the file to be shrunk quite a bit. At the same time, remove a pointless flags override line - the variable assigned to was misspelled anyway, and the options added are meaningless for assembly sources. [ hpa: the patch is not minimal, but I am taking it for -urgent anyway since the excess impact of the patch seems to be small enough. ] Signed-off-by: Jan Beulich Link: http://lkml.kernel.org/r/515C5D2502000078000CA6AD@nat28.tlf.novell.com Cc: Matthew Garrett Cc: Matt Fleming Signed-off-by: H. Peter Anvin --- arch/x86/boot/compressed/Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1b..5ef205c5f37b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += vmlinux.bin.all vmlinux.relocs +targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ -- cgit v1.2.3 From 8f964525a121f2ff2df948dac908dcc65be21b5b Mon Sep 17 00:00:00 2001 From: Andrew Honig Date: Fri, 29 Mar 2013 09:35:21 -0700 Subject: KVM: Allow cross page reads and writes from cached translations. This patch adds support for kvm_gfn_to_hva_cache_init functions for reads and writes that will cross a page. If the range falls within the same memslot, then this will be a fast operation. If the range is split between two memslots, then the slower kvm_read_guest and kvm_write_guest are used. Tested: Test against kvm_clock unit tests. Signed-off-by: Andrew Honig Signed-off-by: Gleb Natapov --- arch/x86/kvm/lapic.c | 2 +- arch/x86/kvm/x86.c | 13 ++++++------- include/linux/kvm_host.h | 2 +- include/linux/kvm_types.h | 1 + virt/kvm/kvm_main.c | 47 +++++++++++++++++++++++++++++++++++++---------- 5 files changed, 46 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr); + addr, sizeof(u8)); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60d..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) gpa_offset = data & ~(PAGE_MASK | 1); - /* Check that the address is 32-byte aligned. */ - if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) - break; - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.pv_time, data & ~1ULL)) + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; @@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS)) + data & KVM_STEAL_VALID_BITS, + sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cad77fe09d77..c13958251927 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa); + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc7244cbd..b0bcce0ddc95 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -71,6 +71,7 @@ struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; + unsigned long len; struct kvm_memory_slot *memslot; }; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index adc68feb5c5a..f18013f09e68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t start_gfn = gpa >> PAGE_SHIFT; + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; + gfn_t nr_pages_needed = end_gfn - start_gfn + 1; + gfn_t nr_pages_avail; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->memslot = gfn_to_memslot(kvm, gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); - if (!kvm_is_error_hva(ghc->hva)) + ghc->len = len; + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { ghc->hva += offset; - else - return -EFAULT; - + } else { + /* + * If the requested region crosses two memslots, we still + * verify that the entire region is valid here. + */ + while (start_gfn <= end_gfn) { + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, + &nr_pages_avail); + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + start_gfn += nr_pages_avail; + } + /* Use the slow path for cross page reads and writes. */ + ghc->memslot = NULL; + } return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_write_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_read_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; -- cgit v1.2.3 From aa8b4be3ac049c8b1df2a87e4d1d902ccfc1f7a9 Mon Sep 17 00:00:00 2001 From: Jay Estabrook Date: Sun, 7 Apr 2013 21:36:09 +1200 Subject: alpha: Add irongate_io to PCI bus resources Fixes a NULL pointer dereference at boot on UP1500. Cc: stable@vger.kernel.org Reviewed-and-Tested-by: Matt Turner Signed-off-by: Jay Estabrook Signed-off-by: Matt Turner Signed-off-by: Michael Cree Signed-off-by: Linus Torvalds --- arch/alpha/kernel/sys_nautilus.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708d..1383f8601a93 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -209,6 +213,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); -- cgit v1.2.3 From cd8d2331756751b6aeb855a3c9cb0a92fbd9c725 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sun, 7 Apr 2013 21:36:10 +1200 Subject: alpha: makefile: don't enforce small data model for kernel builds Due to all of the goodness being packed into today's kernels, the resulting image isn't as slim as it once was. In light of this, don't pass -msmall-data to gcc, which otherwise results in link failures due to impossible relocations when compiling anything but the most trivial configurations. Cc: Richard Henderson Cc: Ivan Kokshaysky Reviewed-by: Matt Turner Tested-by: Thorsten Kranzkowski Signed-off-by: Will Deacon Signed-off-by: Michael Cree Signed-off-by: Linus Torvalds --- arch/alpha/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa1..2cc3cc519c54 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax CHECKFLAGS += -D__alpha__ -m64 -cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data +cflags-y := -pipe -mno-fp-regs -ffixed-8 cflags-y += $(call cc-option, -fno-jump-tables) cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 -- cgit v1.2.3 From e20800fd5cec2a75639a32e956b1cdc023cb87ce Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sun, 7 Apr 2013 21:36:11 +1200 Subject: alpha: irq: run all handlers with interrupts disabled Linux has expected that interrupt handlers are executed with local interrupts disabled for a while now, so ensure that this is the case on Alpha even for non-device interrupts such as IPIs. Without this patch, secondary boot results in the following backtrace: warning: at kernel/softirq.c:139 __local_bh_enable+0xb8/0xd0() trace: __local_bh_enable+0xb8/0xd0 irq_enter+0x74/0xa0 scheduler_ipi+0x50/0x100 handle_ipi+0x84/0x260 do_entint+0x1ac/0x2e0 irq_exit+0x60/0xa0 handle_irq+0x98/0x100 do_entint+0x2c8/0x2e0 ret_from_sys_call+0x0/0x10 load_balance+0x3e4/0x870 cpu_idle+0x24/0x80 rcu_eqs_enter_common.isra.38+0x0/0x120 cpu_idle+0x40/0x80 rest_init+0xc0/0xe0 _stext+0x1c/0x20 A similar dump occurs if you try to reboot using magic-sysrq. Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Reviewed-by: Matt Turner Signed-off-by: Will Deacon Signed-off-by: Michael Cree Signed-off-by: Linus Torvalds --- arch/alpha/kernel/irq.c | 7 ------- arch/alpha/kernel/irq_alpha.c | 9 ++++++++- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd2215..7b2be251c30f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,13 +117,6 @@ handle_irq(int irq) return; } - /* - * From here we must proceed with IPL_MAX. Note that we do not - * explicitly enable interrupts afterwards - some MILO PALcode - * (namely LX164 one) seems to have severe problems with RTI - * at IPL 0. - */ - local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a8..1216dfb4fcc5 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; + + /* + * Disable interrupts during IRQ handling. + * Note that there is no matching local_irq_enable() due to + * severe problems with RTI at IPL0 and some MILO PALcode + * (namely LX164). + */ + local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP @@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, { long cpu; - local_irq_disable(); smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { -- cgit v1.2.3 From e74e25929ce4d4f5f5f4bae585b214bacef6960b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sun, 7 Apr 2013 21:36:12 +1200 Subject: alpha: irq: remove deprecated use of IRQF_DISABLED Interrupt handlers are always invoked with interrupts disabled, so remove all uses of the deprecated IRQF_DISABLED flag. Cc: Richard Henderson Cc: Ivan Kokshaysky Reviewed-by: Matt Turner Signed-off-by: Will Deacon Signed-off-by: Michael Cree Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/floppy.h | 2 +- arch/alpha/kernel/irq_alpha.c | 1 - arch/alpha/kernel/sys_titan.c | 14 +++++++------- 3 files changed, 8 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e73..bae97eb19d26 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - IRQF_DISABLED, "floppy", NULL) + 0, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #ifdef CONFIG_PCI diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 1216dfb4fcc5..f433fc11877a 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -229,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c5..a53cf03f49d5 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void) * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ - titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(63+16, titan_intr_nop, 0, "CChip Error", NULL); - titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(62+16, titan_intr_nop, 0, "PChip 0 H_Error", NULL); - titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(61+16, titan_intr_nop, 0, "PChip 1 H_Error", NULL); - titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(60+16, titan_intr_nop, 0, "PChip 0 C_Error", NULL); - titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(59+16, titan_intr_nop, 0, "PChip 1 C_Error", NULL); /* @@ -348,9 +348,9 @@ privateer_init_pci(void) * Hook a couple of extra err interrupts that the * common titan code won't. */ - titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(53+16, titan_intr_nop, 0, "NMI", NULL); - titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(50+16, titan_intr_nop, 0, "Temperature Warning", NULL); /* -- cgit v1.2.3 From 9fb2640159f9d4f5a2a9d60e490482d4cbecafdb Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Fri, 5 Apr 2013 10:41:40 +0000 Subject: powerpc: pSeries_lpar_hpte_remove fails from Adjunct partition being performed before the ANDCOND test Some versions of pHyp will perform the adjunct partition test before the ANDCOND test. The result of this is that H_RESOURCE can be returned and cause the BUG_ON condition to occur. The HPTE is not removed. So add a check for H_RESOURCE, it is ok if this HPTE is not removed as pSeries_lpar_hpte_remove is looking for an HPTE to remove and not a specific HPTE to remove. So it is ok to just move on to the next slot and try again. Cc: stable@vger.kernel.org Signed-off-by: Michael Wolf Signed-off-by: Stephen Rothwell --- arch/powerpc/platforms/pseries/lpar.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fed355a..299731e9036b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - BUG_ON(lpar_rc != H_NOT_FOUND); + + /* + * The test for adjunct partition is performed before the + * ANDCOND test. H_RESOURCE may be returned, so we need to + * check for that as well. + */ + BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; -- cgit v1.2.3 From b6c7aabd923a17af993c5a5d5d7995f0b27c000a Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Apr 2013 11:44:57 +0100 Subject: ARM: Do 15e0d9e37c (ARM: pm: let platforms select cpu_suspend support) properly Let's do the changes properly and fix the same problem everywhere, not just for one case. Cc: # kernels containing 15e0d9e37c or equivalent Signed-off-by: Russell King --- arch/arm/mm/proc-arm920.S | 2 +- arch/arm/mm/proc-arm926.S | 2 +- arch/arm/mm/proc-mohawk.S | 2 +- arch/arm/mm/proc-sa1100.S | 2 +- arch/arm/mm/proc-v6.S | 2 +- arch/arm/mm/proc-xsc3.S | 2 +- arch/arm/mm/proc-xscale.S | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5e..2556cf1c2da1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e2972..344c8a548cc0 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d6..0b60dd3d742a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd84..d92dfd081429 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8de9325..5c07ee4fe3eb 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f35..e8efd83b6f25 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa18..e766f889bfd6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode -- cgit v1.2.3 From 79e5f05edcbf85825d19eb8a425cd6c36c6c66f1 Mon Sep 17 00:00:00 2001 From: Christian Ruppert Date: Mon, 8 Apr 2013 13:05:30 +0530 Subject: ARC: Add implicit compiler barrier to raw_local_irq* functions ARC irqsave/restore macros were missing the compiler barrier, causing a stale load in irq-enabled region be used in irq-safe region, despite being changed, because the register holding the value was still live. The problem manifested as random crashes in timer code when stress testing ARCLinux (3.9-rc3) on a !SMP && !PREEMPT_COUNT Here's the exact sequence which caused this: (0). tv1[x] <----> t1 <---> t2 (1). mod_timer(t1) interrupted after it calls timer_pending() (2). mod_timer(t2) completes (3). mod_timer(t1) resumes but messes up the list (4). __runt_timers( ) uses bogus timer_list entry / crashes in timer->function Essentially mod_timer() was racing against itself and while the spinlock serialized the tv1[] timer link list, timer_pending() called outside the spinlock, cached timer link list element in a register. With low register pressure (and a deep register file), lack of barrier in raw_local_irqsave() as well as preempt_disable (!PREEMPT_COUNT version), there was nothing to force gcc to reload across the spinlock, causing a stale value in reg be used for link list manipulation - ensuing a corruption. ARcompact disassembly which shows the culprit generated code: mod_timer: push_s blink mov_s r13,r0 # timer, timer .. ###### timer_pending( ) ld_s r3,[r13] # <------ .entry.next LOADED brne r3, 0, @.L163 .L163: .. ###### spin_lock_irq( ) lr r5, [status32] # flags bic r4, r5, 6 # temp, flags, and.f 0, r5, 6 # flags, flag.nz r4 ###### detach_if_pending( ) begins tst_s r3,r3 <-------------- # timer_pending( ) checks timer->entry.next # r3 is NOT reloaded by gcc, using stale value beq.d @.L169 mov.eq r0,0 ##### detach_timer( ): __list_del( ) ld r4,[r13,4] # .entry.prev, D.31439 st r4,[r3,4] # .prev, D.31439 st r3,[r4] # .next, D.30246 We initially tried to fix this by adding barrier() to preempt_* macros for !PREEMPT_COUNT but Linus clarified that it was anything but wrong. http://www.spinics.net/lists/kernel/msg1512709.html [vgupta: updated commitlog] Reported-by/Signed-off-by: Christian Ruppert Cc: Christian Ruppert Cc: Pierrick Hascoet Debugged-by/Signed-off-by: Vineet Gupta Signed-off-by: Linus Torvalds --- arch/arc/include/asm/irqflags.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd84806b62f..eac071668201 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) " flag.nz %0 \n" : "=r"(temp), "=r"(flags) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) - : "cc"); + : "memory", "cc"); return flags; } @@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags) __asm__ __volatile__( " flag %0 \n" : - : "r"(flags)); + : "r"(flags) + : "memory"); } /* @@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void) " and %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) - : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); + : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) + : "memory"); } /* @@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void) __asm__ __volatile__( " lr %0, [status32] \n" - : "=&r"(temp)); + : "=&r"(temp) + : + : "memory"); return temp; } -- cgit v1.2.3 From b2dfaa8d33cee9dd4ed78979f5d70063df546101 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 8 Apr 2013 14:21:31 +1000 Subject: m68k: define a local gpio_request_one() function Compiling for linux-3.9-rc1 and later fails with: drivers/gpio/devres.c: In function 'devm_gpio_request_one': drivers/gpio/devres.c:90:2: error: implicit declaration of function 'gpio_request_one' [-Werror=implicit-function-declaration] So provide a local gpio_request_one() function. Code largely borrowed from blackfin's local gpio_request_one() function. Signed-off-by: Greg Ungerer Acked-by: Linus Walleij --- arch/m68k/include/asm/gpio.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch') diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 4395ffc51fdb..8cc83431805b 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h @@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio) return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); } +static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) +{ + int err; + + err = gpio_request(gpio, label); + if (err) + return err; + + if (flags & GPIOF_DIR_IN) + err = gpio_direction_input(gpio); + else + err = gpio_direction_output(gpio, + (flags & GPIOF_INIT_HIGH) ? 1 : 0); + + if (err) + gpio_free(gpio); + + return err; +} + #endif -- cgit v1.2.3 From a6e4d5a03e9e3587e88aba687d8f225f4f04c792 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 25 Mar 2013 09:14:30 +0000 Subject: x86, efivars: firmware bug workarounds should be in platform code Let's not burden ia64 with checks in the common efivars code that we're not writing too much data to the variable store. That kind of thing is an x86 firmware bug, plain and simple. efi_query_variable_store() provides platforms with a wrapper in which they can perform checks and workarounds for EFI variable storage bugs. Cc: H. Peter Anvin Cc: Matthew Garrett Signed-off-by: Matt Fleming --- arch/x86/platform/efi/efi.c | 25 +++++++++++++++++++++++++ drivers/firmware/efivars.c | 18 +++--------------- include/linux/efi.h | 9 ++++++++- 3 files changed, 36 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 5f2ecaf3f9d8..c89c245eff40 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -999,3 +999,28 @@ u64 efi_mem_attributes(unsigned long phys_addr) } return 0; } + +/* + * Some firmware has serious problems when using more than 50% of the EFI + * variable store, i.e. it triggers bugs that can brick machines. Ensure that + * we never use more than this safe limit. + * + * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable + * store. + */ +efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ + efi_status_t status; + u64 storage_size, remaining_size, max_size; + + status = efi.query_variable_info(attributes, &storage_size, + &remaining_size, &max_size); + if (status != EFI_SUCCESS) + return status; + + if (!storage_size || size > remaining_size || size > max_size || + (remaining_size - size) < (storage_size / 2)) + return EFI_OUT_OF_RESOURCES; + + return EFI_SUCCESS; +} diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 7acafb80fd4c..bf15d81d74e1 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -436,24 +436,12 @@ static efi_status_t check_var_size_locked(struct efivars *efivars, u32 attributes, unsigned long size) { - u64 storage_size, remaining_size, max_size; - efi_status_t status; const struct efivar_operations *fops = efivars->ops; - if (!efivars->ops->query_variable_info) + if (!efivars->ops->query_variable_store) return EFI_UNSUPPORTED; - status = fops->query_variable_info(attributes, &storage_size, - &remaining_size, &max_size); - - if (status != EFI_SUCCESS) - return status; - - if (!storage_size || size > remaining_size || size > max_size || - (remaining_size - size) < (storage_size / 2)) - return EFI_OUT_OF_RESOURCES; - - return status; + return fops->query_variable_store(attributes, size); } @@ -2131,7 +2119,7 @@ efivars_init(void) ops.get_variable = efi.get_variable; ops.set_variable = efi.set_variable; ops.get_next_variable = efi.get_next_variable; - ops.query_variable_info = efi.query_variable_info; + ops.query_variable_store = efi_query_variable_store; error = register_efivars(&__efivars, &ops, efi_kobj); if (error) diff --git a/include/linux/efi.h b/include/linux/efi.h index 9bf2f1fcae27..3d7df3d32c66 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, unsigned long count, u64 *max_size, int *reset_type); +typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); /* * EFI Configuration Table and GUID definitions @@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos #ifdef CONFIG_X86 extern void efi_late_init(void); extern void efi_free_boot_services(void); +extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); #else static inline void efi_late_init(void) {} static inline void efi_free_boot_services(void) {} + +static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ + return EFI_SUCCESS; +} #endif extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); extern u64 efi_get_iobase (void); @@ -731,7 +738,7 @@ struct efivar_operations { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; - efi_query_variable_info_t *query_variable_info; + efi_query_variable_store_t *query_variable_store; }; struct efivars { -- cgit v1.2.3 From cab1e0a36c9dd0b0671fb84197ed294513f5adc1 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 29 Mar 2013 16:20:09 +0100 Subject: ARM: clk-imx35: Bugfix iomux clock This patch enables iomuxc_gate clock. It is necessary to be able to reconfigure iomux pads. Without this clock enabled, the clk_disable_unused function will disable this clock and the iomux pads are not configurable anymore. This happens at every boot. After a reboot (watchdog system reset) the clock is not enabled again, so all iomux pad reconfigurations in boot code are without effect. The iomux pads should be always configurable, so this patch always enables it. Signed-off-by: Markus Pargmann Cc: stable@vger.kernel.org Signed-off-by: Sascha Hauer Signed-off-by: Shawn Guo --- arch/arm/mach-imx/clk-imx35.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index e13a8fa5e62c..b95898a3912c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -265,6 +265,7 @@ int __init mx35_clocks_init(void) clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); clk_prepare_enable(clk[max_gate]); + clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code -- cgit v1.2.3 From 75498083e25e96932ad998ffdeadb17234c68d3a Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 29 Mar 2013 16:20:10 +0100 Subject: ARM: imx35 Bugfix admux clock The admux clock seems to be the audmux clock as tests show. audmux does not work without this clock enabled. Currently imx35 does not register a clock device for audmux. This patch adds this registration. imx-audmux driver already handles a clock device, so no changes are necessary there. Signed-off-by: Markus Pargmann Cc: stable@vger.kernel.org Signed-off-by: Sascha Hauer Signed-off-by: Shawn Guo --- arch/arm/mach-imx/clk-imx35.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index b95898a3912c..2193c834f55c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); + clk_register_clkdev(clk[admux_gate], "audmux", NULL); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); -- cgit v1.2.3 From 2bb4b70b1dbb45f0c1a3ba98066e6635d8aa3fe0 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Wed, 3 Apr 2013 23:50:09 +0800 Subject: ARM: imx: provide twd clock lookup from device tree While booting from device tree, imx6q used to provide twd clock lookup by calling clk_register_clkdev() in clock driver. However, the commit bd60345 (ARM: use device tree to get smp_twd clock) forces DT boot to look up the clock from device tree. It causes the failure below when twd driver tries to get the clock, and hence kernel has to calibrate the local timer frequency. smp_twd: clock not found -2 ... Calibrating local timer... 396.13MHz. Fix the regression by providing twd clock lookup from device tree, and remove the unused twd clk_register_clkdev() call from clock driver. Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6qdl.dtsi | 1 + arch/arm/mach-imx/clk-imx6q.c | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b4581..281a223591ff 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -91,6 +91,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x00a00600 0x20>; interrupts = <1 13 0xf01>; + clocks = <&clks 15>; }; L2: l2-cache@00a02000 { diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 2f9ff93a4e61..22a3021a455b 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); - clk_register_clkdev(clk[twd], NULL, "smp_twd"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); -- cgit v1.2.3 From e8094b2c17126c7dfdeafa296f206a4a3b122d23 Mon Sep 17 00:00:00 2001 From: Dirk Behme Date: Thu, 4 Apr 2013 16:03:29 +0200 Subject: ARM i.MX6: Fix ldb_di clock selection According to the recent i.MX6 Quad technical reference manual, mode 0x4 (100b) of the CCM_CS2DCR register (address 0x020C402C) bits [11-9] and [14-12] select the PLL3 clock, and not the PLL3 PFD1 540M clock. In our code, the PLL3 root clock is named 'pll3_usb_otg', select this instead of the 540M clock. Signed-off-by: Dirk Behme Signed-off-by: Shawn Guo --- arch/arm/mach-imx/clk-imx6q.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 22a3021a455b..d38e54f5b6d7 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; +static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; -- cgit v1.2.3 From b530f742ac27460d41d35b638ad6aad92044a982 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Mon, 8 Apr 2013 21:39:45 +0900 Subject: ARM: S3C24XX: Correct NR_IRQS definition for s3c2440 Due to NR_IRQS being incorrectly defined not all IRQ domains can be registered for S3C2440. It causes following errors on a s3c2440 SoC based board: NR_IRQS:89 S3C2440: IRQ Support irq: clearing pending status 00000002 ------------[ cut here ]------------ WARNING: at kernel/irq/irqdomain.c:234 0xc0056ed0() ... irq: could not create irq-domain ... s3c2410-wdt s3c2410-wdt: failed to install irq (-22) s3c2410-wdt: probe of s3c2410-wdt failed with error -22 ... samsung-uart s3c2440-uart.0: cannot get irq 74 Fix this by increasing NR_IRQS to at least (IRQ_S3C2443_AC97 + 1) if CPU_S3C2440 is selected, so the subintc IRQ domain gets properly registered. Signed-off-by: Tomasz Figa Signed-off-by: Sylwester Nawrocki Acked-by: Heiko Stuebner Signed-off-by: Kukjin Kim --- arch/arm/mach-s3c24xx/include/mach/irqs.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d469e8..1e73f5fa8659 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h @@ -188,10 +188,8 @@ #if defined(CONFIG_CPU_S3C2416) #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) -#elif defined(CONFIG_CPU_S3C2443) -#define NR_IRQS (IRQ_S3C2443_AC97+1) #else -#define NR_IRQS (IRQ_S3C2440_AC97+1) +#define NR_IRQS (IRQ_S3C2443_AC97 + 1) #endif /* compatibility define. */ -- cgit v1.2.3 From 646dd2f0a980949b05042792fbadd72b735c3eda Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Tue, 9 Apr 2013 23:52:21 +0900 Subject: ARM: S3C24XX: Fix interrupt pending register offset of the EINT controller The external pending interrupt register address (EINTPEND) offset is 0xa8, not 0x08. Without this patch the external interrupts are not properly acknowledged, which may lead to an interrupt storm and the system hang as soon as any external interrupt is requested. Signed-off-by: Sylwester Nawrocki Reviewed-by: Heiko Stuebner Signed-off-by: Kukjin Kim --- arch/arm/mach-s3c24xx/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e011e73..d8ba9bee4c7e 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c @@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np, base = (void *)0xfd000000; intc->reg_mask = base + 0xa4; - intc->reg_pending = base + 0x08; + intc->reg_pending = base + 0xa8; irq_num = 20; irq_start = S3C2410_IRQ(32); irq_offset = 4; -- cgit v1.2.3 From 3e2e0d2c222bdf5bafd722dec1618fa6073ef372 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Tue, 9 Apr 2013 12:33:07 -0400 Subject: tile: comment assumption about __insn_mtspr for The arch_local_irq_save(), etc., routines are required to function as compiler barriers. They do, but it's subtle and requires knowing that the gcc builtin __insn_mtspr() is marked as a memory clobber. Provide a comment explaining the assumption. Signed-off-by: Chris Metcalf [ This came about from me wondering about the synchronization rules of __insn_mtspr() - Linus ] Signed-off-by: Linus Torvalds --- arch/tile/include/asm/irqflags.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb60b12..c96f9bbb760d 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -40,7 +40,15 @@ #include #include -/* Set and clear kernel interrupt masks. */ +/* + * Set and clear kernel interrupt masks. + * + * NOTE: __insn_mtspr() is a compiler builtin marked as a memory + * clobber. We rely on it being equivalent to a compiler barrier in + * this code since arch_local_irq_save() and friends must act as + * compiler barriers. This compiler semantic is baked into enough + * places that the compiler will maintain it going forward. + */ #if CHIP_HAS_SPLIT_INTR_MASK() #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 # error Fix assumptions about which word various interrupts are in -- cgit v1.2.3 From ccf932042fa7785832d8989ba1369cd7c7f5d7a1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 31 Mar 2013 22:34:37 -0400 Subject: palinfo fixes * check for proc_mkdir() failures * fix buffer overrun - sizeof(format string) is *not* enough to hold sprintf() result. * use proc_remove_subtree(); life's much easier with it Signed-off-by: Al Viro --- arch/ia64/kernel/palinfo.c | 77 ++++++++-------------------------------------- 1 file changed, 13 insertions(+), 64 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60a..79521d5499f9 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={ #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - * - +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; static struct proc_dir_entry *palinfo_dir; /* @@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi static void __cpuinit create_palinfo_proc_entries(unsigned int cpu) { -# define CPUSTR "cpu%d" - pal_func_cpu_u_t f; - struct proc_dir_entry **pdir; struct proc_dir_entry *cpu_dir; int j; - char cpustr[sizeof(CPUSTR)]; - - - /* - * we keep track of created entries in a depth-first order for - * cleanup purposes. Each entry is stored into palinfo_proc_entries - */ - sprintf(cpustr,CPUSTR, cpu); + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); + if (!cpu_dir) + return; f.req_cpu = cpu; - /* - * Compute the location to store per cpu entries - * We dont store the top level entry in this list, but - * remove it finally after removing all cpu entries. - */ - pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; - *pdir++ = cpu_dir; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - *pdir = create_proc_read_entry( - palinfo_entries[j].name, 0, cpu_dir, - palinfo_read_entry, (void *)f.value); - pdir++; + create_proc_read_entry( + palinfo_entries[j].name, 0, cpu_dir, + palinfo_read_entry, (void *)f.value); } } static void remove_palinfo_proc_entries(unsigned int hcpu) { - int j; - struct proc_dir_entry *cpu_dir, **pdir; - - pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; - cpu_dir = *pdir; - *pdir++=NULL; - for (j=0; j < (NR_PALINFO_ENTRIES); j++) { - if ((*pdir)) { - remove_proc_entry ((*pdir)->name, cpu_dir); - *pdir ++= NULL; - } - } - - if (cpu_dir) { - remove_proc_entry(cpu_dir->name, palinfo_dir); - } + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", hcpu); + remove_proc_subtree(cpustr, palinfo_dir); } static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1058,6 +1019,8 @@ palinfo_init(void) printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); + if (!palinfo_dir) + return -ENOMEM; /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { @@ -1073,22 +1036,8 @@ palinfo_init(void) static void __exit palinfo_exit(void) { - int i = 0; - - /* remove all nodes: depth first pass. Could optimize this */ - for_each_online_cpu(i) { - remove_palinfo_proc_entries(i); - } - - /* - * Remove the top level entry finally - */ - remove_proc_entry(palinfo_dir->name, NULL); - - /* - * Unregister from cpu notifier callbacks - */ unregister_hotcpu_notifier(&palinfo_cpu_notifier); + remove_proc_subtree("pal", NULL); } module_init(palinfo_init); -- cgit v1.2.3 From f934af05cb1bf20558542185299394a69060b829 Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Tue, 9 Apr 2013 15:35:46 -0400 Subject: add memory barrier to arch_local_irq_restore arch_local_irq_save() and friends are required to act as compiler memory barriers. This patch adds a "memory" clobber to the inline asm code in arch_local_irq_restore() which is used as the building block for other functions needing to set/clear the interrupt enable in the CSR register. Signed-off-by: Mark Salter --- arch/c6x/include/asm/irqflags.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09e18c3..2c71d5634ec2 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h @@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) /* set interrupt enabled status */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); + asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); } /* unconditionally enable interrupts */ -- cgit v1.2.3 From f110c0c1926028a233830c6166e4d40314420823 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 9 Apr 2013 16:18:55 +1000 Subject: powerpc: fix compiling CONFIG_PPC_TRANSACTIONAL_MEM when CONFIG_ALTIVEC=n We can't compile a kernel with CONFIG_ALTIVEC=n when CONFIG_PPC_TRANSACTIONAL_MEM=y. We currently get: arch/powerpc/kernel/tm.S:320: Error: unsupported relocation against THREAD_VSCR arch/powerpc/kernel/tm.S:323: Error: unsupported relocation against THREAD_VR0 arch/powerpc/kernel/tm.S:323: Error: unsupported relocation against THREAD_VR0 etc. The below fixes this with a sprinkling of #ifdefs. This was found by mpe with kisskb: http://kisskb.ellerman.id.au/kisskb/buildresult/8539442/ Signed-off-by: Michael Neuling Signed-off-by: Stephen Rothwell --- arch/powerpc/kernel/process.c | 2 ++ arch/powerpc/kernel/signal_32.c | 2 ++ arch/powerpc/kernel/signal_64.c | 2 ++ arch/powerpc/kernel/tm.S | 2 ++ 4 files changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59dd545fdde1..16e77a81ab4f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) new->thread.regs->msr |= (MSR_FP | new->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&new->thread); new->thread.regs->msr |= MSR_VEC; } +#endif /* We may as well turn on VSX too since all the state is restored now */ if (msr & MSR_VSX) new->thread.regs->msr |= MSR_VSX; diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3acb28e245b4..95068bf569ad 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return 0; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 995f8543cb57..c1794286098c 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return err; } diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 84dbace657ce..2da67e7a16d5 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint) or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ mtmsr r5 +#ifdef CONFIG_ALTIVEC /* FP and VEC registers: These are recheckpointed from thread.fpr[] * and thread.vr[] respectively. The thread.transact_fpr[] version * is more modern, and will be loaded subsequently by any FPUnavailable @@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint) REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ ld r5, THREAD_VRSAVE(r3) mtspr SPRN_VRSAVE, r5 +#endif dont_restore_vec: andi. r0, r4, MSR_FP -- cgit v1.2.3 From aaaf165b247a1a8ea5cd2936d9fd1eefe5e580f9 Mon Sep 17 00:00:00 2001 From: Nigel Roberts Date: Sun, 31 Mar 2013 15:13:24 +1100 Subject: Fix GE0/GE1 init on ix2-200 as GE0 has no PHY Signed-off-by: Jason Cooper --- arch/arm/mach-kirkwood/board-iomega_ix2_200.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b2637b0e..e5f70415905a 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c @@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = { .duplex = DUPLEX_FULL, }; +static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(11), +}; + void __init iomega_ix2_200_init(void) { /* * Basic setup. Needs to be called early. */ - kirkwood_ge01_init(&iomega_ix2_200_ge00_data); + kirkwood_ge00_init(&iomega_ix2_200_ge00_data); + kirkwood_ge01_init(&iomega_ix2_200_ge01_data); } -- cgit v1.2.3 From 600468d0686096ddc1000c8a2e69475931084414 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Fri, 5 Apr 2013 14:32:52 +0200 Subject: arm: mvebu: Fix the irq map function in SMP mode This patch fix the regression introduced by the commit 3202bf0157ccb "arm: mvebu: Improve the SMP support of the interrupt controller": GPIO IRQ were no longer delivered to the CPUs. To be delivered to a CPU an interrupt must be enabled at CPU level and at interrupt source level. Before the offending patch, all the interrupts were enabled at source level during map() function. Mask() and unmask() was done by handling the per-CPU part. It was fine when running in UP with only one CPU. The offending patch added support for SMP, in this case mask() and unmask() was done by handling the interrupt source level part. The per-CPU level part was handled by the affinity API to select the CPU which will receive the interrupt. (Due to some hardware limitation only one CPU at a time can received a given interrupt). For "normal" interrupt __setup_irq() was called when an irq was registered. irq_set_affinity() is called from this function, which enabled the interrupt on one of the CPUs. Whereas for GPIO IRQ which were chained interrupts, the irq_set_affinity() was never called and none of the CPUs was selected to receive the interrupt. With this patch all the interrupt are enable on the current CPU during map() function. Enabling the interrupts on a CPU doesn't depend anymore on irq_set_affinity() and then the chained irq are not anymore a special case. However the CPU which will receive the irq can still be modify later using irq_set_affinity(). Tested with Mirabox (A370) and Openblocks AX3 (AXP), rootfs mounted over NFS, compiled with CONFIG_SMP=y/N. Signed-off-by: Gregory CLEMENT Reported-by: Ryan Press Investigated-by: Ezequiel Garcia Tested-by: Ezequiel Garcia Tested-by: Ryan Press Signed-off-by: Jason Cooper --- arch/arm/mach-mvebu/irq-armada-370-xp.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 6a9195e10579..d5970f5a1e8d 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c @@ -61,7 +61,6 @@ static struct irq_domain *armada_370_xp_mpic_domain; */ static void armada_370_xp_irq_mask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) @@ -70,15 +69,10 @@ static void armada_370_xp_irq_mask(struct irq_data *d) else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#endif } static void armada_370_xp_irq_unmask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) @@ -87,10 +81,6 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#endif } #ifdef CONFIG_SMP @@ -146,7 +136,11 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { armada_370_xp_irq_mask(irq_get_irq_data(virq)); - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hw, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + else + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); irq_set_status_flags(virq, IRQ_LEVEL); if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { -- cgit v1.2.3 From 1160c2779b826c6f5c08e5cc542de58fd1f667d5 Mon Sep 17 00:00:00 2001 From: Samu Kallio Date: Sat, 23 Mar 2013 09:36:35 -0400 Subject: x86, mm, paravirt: Fix vmalloc_fault oops during lazy MMU updates In paravirtualized x86_64 kernels, vmalloc_fault may cause an oops when lazy MMU updates are enabled, because set_pgd effects are being deferred. One instance of this problem is during process mm cleanup with memory cgroups enabled. The chain of events is as follows: - zap_pte_range enables lazy MMU updates - zap_pte_range eventually calls mem_cgroup_charge_statistics, which accesses the vmalloc'd mem_cgroup per-cpu stat area - vmalloc_fault is triggered which tries to sync the corresponding PGD entry with set_pgd, but the update is deferred - vmalloc_fault oopses due to a mismatch in the PUD entries The OOPs usually looks as so: ------------[ cut here ]------------ kernel BUG at arch/x86/mm/fault.c:396! invalid opcode: 0000 [#1] SMP .. snip .. CPU 1 Pid: 10866, comm: httpd Not tainted 3.6.10-4.fc18.x86_64 #1 RIP: e030:[] [] vmalloc_fault+0x11f/0x208 .. snip .. Call Trace: [] do_page_fault+0x399/0x4b0 [] ? xen_mc_extend_args+0xec/0x110 [] page_fault+0x25/0x30 [] ? mem_cgroup_charge_statistics.isra.13+0x13/0x50 [] __mem_cgroup_uncharge_common+0xd8/0x350 [] mem_cgroup_uncharge_page+0x57/0x60 [] page_remove_rmap+0xe0/0x150 [] ? vm_normal_page+0x1a/0x80 [] unmap_single_vma+0x531/0x870 [] unmap_vmas+0x52/0xa0 [] ? pte_mfn_to_pfn+0x72/0x100 [] exit_mmap+0x98/0x170 [] ? __raw_callee_save_xen_pmd_val+0x11/0x1e [] mmput+0x83/0xf0 [] exit_mm+0x104/0x130 [] do_exit+0x15a/0x8c0 [] do_group_exit+0x3f/0xa0 [] sys_exit_group+0x17/0x20 [] system_call_fastpath+0x16/0x1b Calling arch_flush_lazy_mmu_mode immediately after set_pgd makes the changes visible to the consistency checks. Cc: RedHat-Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=914737 Tested-by: Josh Boyer Reported-and-Tested-by: Krishna Raman Signed-off-by: Samu Kallio Link: http://lkml.kernel.org/r/1364045796-10720-1-git-send-email-konrad.wilk@oracle.com Tested-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: H. Peter Anvin --- arch/x86/mm/fault.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b97525246d4..0e883364abb5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); - else + arch_flush_lazy_mmu_mode(); + } else { BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } /* * Below here mismatches are bugs because these lower tables -- cgit v1.2.3 From 511ba86e1d386f671084b5d0e6f110bb30b8eeb2 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Sat, 23 Mar 2013 09:36:36 -0400 Subject: x86, mm: Patch out arch_flush_lazy_mmu_mode() when running on bare metal Invoking arch_flush_lazy_mmu_mode() results in calls to preempt_enable()/disable() which may have performance impact. Since lazy MMU is not used on bare metal we can patch away arch_flush_lazy_mmu_mode() so that it is never called in such environment. [ hpa: the previous patch "Fix vmalloc_fault oops during lazy MMU updates" may cause a minor performance regression on bare metal. This patch resolves that performance regression. It is somewhat unclear to me if this is a good -stable candidate. ] Signed-off-by: Boris Ostrovsky Link: http://lkml.kernel.org/r/1364045796-10720-2-git-send-email-konrad.wilk@oracle.com Tested-by: Josh Boyer Tested-by: Konrad Rzeszutek Wilk Acked-by: Borislav Petkov Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: H. Peter Anvin Cc: SEE NOTE ABOVE --- arch/x86/include/asm/paravirt.h | 5 ++++- arch/x86/include/asm/paravirt_types.h | 2 ++ arch/x86/kernel/paravirt.c | 25 +++++++++++++------------ arch/x86/lguest/boot.c | 1 + arch/x86/xen/mmu.c | 1 + 5 files changed, 21 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5edd1742cfd0..7361e47db79f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +} static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 142236ed83af..b3b0ec1dac86 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,6 +91,7 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); + void (*flush)(void); }; struct pv_time_ops { @@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 17fff18a1031..8bfb335f74bb 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } +void paravirt_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return this_cpu_read(paravirt_lazy_mode); } -void arch_flush_lazy_mmu_mode(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, + .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cbd89ca5569..7114c63f047d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1334,6 +1334,7 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6afbb2ca9a0a..2f5d6875555e 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2196,6 +2196,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, + .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, -- cgit v1.2.3 From f4d7a536faa43f30d87f701268db76b8433a2a8b Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Wed, 10 Apr 2013 19:41:38 +0000 Subject: ARM: OMAP4: hwmod data: make 'ocp2scp_usb_phy_phy_48m" as the main clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 92702df3570e ("ARM: OMAP4: PM: fix PM regression introduced by recent clock cleanup") makes the 'ocp2scp_usb_phy_phy_48m' as optional functional clock causing regression in MUSB. But this 48MHz clock is a mandatory clock for usb phy attached to ocp2scp and hence made as the main clock for ocp2scp. Cc: Keerthy Cc: Benoît Cousson Cc: Paul Walmsley Signed-off-by: Kishon Vijay Abraham I [paul@pwsan.com: add comment to the hwmod data to try to prevent any future mistakes here] Signed-off-by: Paul Walmsley Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod_44xx_data.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 9e0576569e07..eaba9dc91a0d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2714,16 +2714,22 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = { { } }; -static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = { - { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" }, -}; - /* ocp2scp_usb_phy */ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", .class = &omap44xx_ocp2scp_hwmod_class, .clkdm_name = "l3_init_clkdm", - .main_clk = "func_48m_fclk", + /* + * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP + * block as an "optional clock," and normally should never be + * specified as the main_clk for an OMAP IP block. However it + * turns out that this clock is actually the main clock for + * the ocp2scp_usb_phy IP block: + * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html + * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems + * to be the best workaround. + */ + .main_clk = "ocp2scp_usb_phy_phy_48m", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, @@ -2732,8 +2738,6 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { }, }, .dev_attr = ocp2scp_dev_attr, - .opt_clks = ocp2scp_usb_phy_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks), }; /* -- cgit v1.2.3 From c5e6cb051c5f7d56f05bd6a4af22cb300a4ced79 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Mon, 18 Feb 2013 18:13:09 +0000 Subject: kvm/powerpc/e500mc: fix tlb invalidation on cpu migration The existing check handles the case where we've migrated to a different core than we last ran on, but it doesn't handle the case where we're still on the same cpu we last ran on, but some other vcpu has run on this cpu in the meantime. Without this, guest segfaults (and other misbehavior) have been seen in smp guests. Cc: stable@vger.kernel.org # 3.8.x Signed-off-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500mc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26e65fb..2f4baa074b2e 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { } +static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); + void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); mtspr(SPRN_GESR, vcpu->arch.shared->esr); - if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) + if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || + __get_cpu_var(last_vcpu_on_cpu) != vcpu) { kvmppc_e500_tlbil_all(vcpu_e500); + __get_cpu_var(last_vcpu_on_cpu) = vcpu; + } kvmppc_load_guest_fp(vcpu); } -- cgit v1.2.3 From f76cfa3c2496c462b5bc01bd0c9340c2715b73ca Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Wed, 10 Apr 2013 15:28:25 +0200 Subject: x86/mm/cpa: Convert noop to functional fix Commit: a8aed3e0752b ("x86/mm/pageattr: Prevent PSE and GLOABL leftovers to confuse pmd/pte_present and pmd_huge") introduced a valid fix but one location that didn't trigger the bug that lead to finding those (small) problems, wasn't updated using the right variable. The wrong variable was also initialized for no good reason, that may have been the source of the confusion. Remove the noop initialization accordingly. Commit a8aed3e0752b also erroneously removed one canon_pgprot pass meant to clear pmd bitflags not supported in hardware by older CPUs, that automatically gets corrected by this patch too by applying it to the right variable in the new location. Reported-by: Stefan Bader Signed-off-by: Andrea Arcangeli Acked-by: Borislav Petkov Cc: Andy Whitcroft Cc: Mel Gorman Link: http://lkml.kernel.org/r/1365600505-19314-1-git-send-email-aarcange@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 091934e1d0d9..7896f7190fda 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * We are safe now. Check whether the new pgprot is the same: */ old_pte = *kpte; - old_prot = new_prot = req_prot = pte_pgprot(old_pte); + old_prot = req_prot = pte_pgprot(old_pte); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); @@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL * for the ancient hardware that doesn't support it. */ - if (pgprot_val(new_prot) & _PAGE_PRESENT) - pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; + if (pgprot_val(req_prot) & _PAGE_PRESENT) + pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; else - pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); + pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); - new_prot = canon_pgprot(new_prot); + req_prot = canon_pgprot(req_prot); /* * old_pte points to the large page base address. So we need -- cgit v1.2.3 From 6b2ba1a9129bd98b5268a4efb167c95189b3eabf Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Wed, 13 Feb 2013 19:37:48 +0000 Subject: kvm/ppc/e500: h2g_tlb1_rmap: esel 0 is valid Add one to esel values in h2g_tlb1_rmap, so that "no mapping" can be distinguished from "esel 0". Note that we're not saved by the fact that host esel 0 is reserved for non-KVM use, because KVM host esel numbering is not the raw host numbering (see to_htlb1_esel). Signed-off-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_mmu_host.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9b..35fb80ec1f57 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -511,10 +511,10 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } - vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; return sesel; } -- cgit v1.2.3 From 66a5fecdccd4f32a22d2d110cf4f002755b520d8 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Wed, 13 Feb 2013 19:37:49 +0000 Subject: kvm/ppc/e500: g2h_tlb1_map: clear old bit before setting new bit It's possible that we're using the same host TLB1 slot to map (a presumably different portion of) the same guest TLB1 entry. Clear the bit in the map before setting it, so that if the esels are the same the bit will remain set. Signed-off-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_mmu_host.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 35fb80ec1f57..8e72b2124f63 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -507,13 +507,14 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; - vcpu_e500->tlb_refs[1][sesel] = *ref; - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } + + vcpu_e500->tlb_refs[1][sesel] = *ref; + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; return sesel; -- cgit v1.2.3 From 4d2be6f7c75e814ee28b007dbf0c26dfcbbe20a1 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Wed, 6 Mar 2013 16:02:49 +0000 Subject: kvm/ppc/e500: eliminate tlb_refs Commit 523f0e5421c12610527c620b983b443f329e3a32 ("KVM: PPC: E500: Explicitly mark shadow maps invalid") began using E500_TLB_VALID for guest TLB1 entries, and skipping invalidations if it's not set. However, when E500_TLB_VALID was set for such entries, it was on a fake local ref, and so the invalidations never happen. gtlb_privs is documented as being only for guest TLB0, though we already violate that with E500_TLB_BITMAP. Now that we have MMU notifiers, and thus don't need to actually retain a reference to the mapped pages, get rid of tlb_refs, and use gtlb_privs for E500_TLB_VALID in TLB1. Since we can have more than one host TLB entry for a given tlbe_ref, be careful not to clear existing flags that are relevant to other host TLB entries when preparing a new host TLB entry. Signed-off-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500.h | 24 +++++-------- arch/powerpc/kvm/e500_mmu_host.c | 75 ++++++++++++---------------------------- 2 files changed, 30 insertions(+), 69 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd43655f..33db48a8ce24 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -26,17 +26,20 @@ #define E500_PID_NUM 3 #define E500_TLB_NUM 2 -#define E500_TLB_VALID 1 -#define E500_TLB_BITMAP 2 +/* entry is mapped somewhere in host TLB */ +#define E500_TLB_VALID (1 << 0) +/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ +#define E500_TLB_BITMAP (1 << 1) +/* TLB1 entry is mapped by host TLB0 */ #define E500_TLB_TLB0 (1 << 2) struct tlbe_ref { - pfn_t pfn; - unsigned int flags; /* E500_TLB_* */ + pfn_t pfn; /* valid only for TLB0, except briefly */ + unsigned int flags; /* E500_TLB_* */ }; struct tlbe_priv { - struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ + struct tlbe_ref ref; }; #ifdef CONFIG_KVM_E500V2 @@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { unsigned int gtlb_nv[E500_TLB_NUM]; - /* - * information associated with each host TLB entry -- - * TLB1 only for now. If/when guest TLB1 entries can be - * mapped with host TLB0, this will be used for that too. - * - * We don't want to use this for guest TLB0 because then we'd - * have the overhead of doing the translation again even if - * the entry is still in the guest TLB (e.g. we swapped out - * and back, and our host TLB entries got evicted). - */ - struct tlbe_ref *tlb_refs[E500_TLB_NUM]; unsigned int host_tlb1_nv; u32 svr; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 8e72b2124f63..1c6a9d729df4 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ - if (!(ref->flags & E500_TLB_VALID)) - return; + if (!(ref->flags & E500_TLB_VALID)) { + WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), + "%s: flags %x\n", __func__, ref->flags); + WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); + } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; @@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, pfn_t pfn) { ref->pfn = pfn; - ref->flags = E500_TLB_VALID; + ref->flags |= E500_TLB_VALID; if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); @@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { + /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } @@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { - int tlbsel = 0; - int i; - - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->gtlb_priv[tlbsel][i].ref; - kvmppc_e500_ref_release(ref); - } -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int stlbsel = 1; + int tlbsel; int i; - kvmppc_e500_tlbil_all(vcpu_e500); - - for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->tlb_refs[stlbsel][i]; - kvmppc_e500_ref_release(ref); + for (tlbsel = 0; tlbsel <= 1; tlbsel++) { + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } } - - clear_tlb_privs(vcpu_e500); } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - clear_tlb_refs(vcpu_e500); + kvmppc_e500_tlbil_all(vcpu_e500); + clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } @@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } - /* Drop old ref and setup new one. */ - kvmppc_e500_ref_release(ref); kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, @@ -512,10 +501,10 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } - vcpu_e500->tlb_refs[1][sesel] = *ref; vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; + WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } @@ -527,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { - struct tlbe_ref ref; + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; - ref.flags = 0; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, - &ref); + ref); if (r) return r; @@ -545,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, } /* Otherwise map into TLB1 */ - sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; @@ -566,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - /* Triggers after clear_tlb_refs or on initial mapping */ + /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { @@ -666,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; - vcpu_e500->tlb_refs[0] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[0]) - goto err; - - vcpu_e500->tlb_refs[1] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[1]) - goto err; - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * host_tlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) - goto err; + return -EINVAL; return 0; - -err: - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); - return -EINVAL; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); } -- cgit v1.2.3 From 7791c8423f1f7f4dad94e753bae67461d5b80be8 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Wed, 10 Apr 2013 10:59:34 +0200 Subject: x86,efi: Check max_size only if it is non-zero. Some EFI implementations return always a MaximumVariableSize of 0, check against max_size only if it is non-zero. My Intel DQ67SW desktop board has such an implementation. Signed-off-by: Richard Weinberger Signed-off-by: Matt Fleming --- arch/x86/platform/efi/efi.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index c89c245eff40..3f96a487aa2a 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -1018,7 +1018,12 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) if (status != EFI_SUCCESS) return status; - if (!storage_size || size > remaining_size || size > max_size || + if (!max_size && remaining_size > size) + printk_once(KERN_ERR FW_BUG "Broken EFI implementation" + " is returning MaxVariableSize=0\n"); + + if (!storage_size || size > remaining_size || + (max_size && size > max_size) || (remaining_size - size) < (storage_size / 2)) return EFI_OUT_OF_RESOURCES; -- cgit v1.2.3 From 18699739b60cb60230153ff5475b2ba92be185f9 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 11 Apr 2013 15:36:09 +0200 Subject: x86/mm/cpa/selftest: Fix false positive in CPA self test If the pmd is not present, _PAGE_PSE will not be set anymore. Fix the false positive. Reported-by: Ingo Molnar Signed-off-by: Andrea Arcangeli Cc: Stefan Bader Cc: Andy Whitcroft Cc: Mel Gorman Cc: Borislav Petkov Link: http://lkml.kernel.org/r/1365687369-30802-1-git-send-email-aarcange@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr-test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b0086567271c..0e38951e65eb 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -68,7 +68,7 @@ static int print_split(struct split_state *s) s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { - if (!(pte_val(*pte) & _PAGE_PSE)) { + if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); -- cgit v1.2.3 From 26564600c9e88c6572a5e6ef5ae9121907edfb7f Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Thu, 11 Apr 2013 13:59:52 -0400 Subject: x86/mm: Flush lazy MMU when DEBUG_PAGEALLOC is set When CONFIG_DEBUG_PAGEALLOC is set page table updates made by kernel_map_pages() are not made visible (via TLB flush) immediately if lazy MMU is on. In environments that support lazy MMU (e.g. Xen) this may lead to fatal page faults, for example, when zap_pte_range() needs to allocate pages in __tlb_remove_page() -> tlb_next_batch(). Signed-off-by: Boris Ostrovsky Cc: konrad.wilk@oracle.com Link: http://lkml.kernel.org/r/1365703192-2089-1-git-send-email-boris.ostrovsky@oracle.com Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7896f7190fda..fb4e73ec24d8 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) * but that can deadlock->flush only current cpu: */ __flush_tlb_all(); + + arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_HIBERNATION -- cgit v1.2.3 From 1de14c3c5cbc9bb17e9dcc648cda51c0c85d54b9 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Apr 2013 16:23:54 -0700 Subject: x86-32: Fix possible incomplete TLB invalidate with PAE pagetables This patch attempts to fix: https://bugzilla.kernel.org/show_bug.cgi?id=56461 The symptom is a crash and messages like this: chrome: Corrupted page table at address 34a03000 *pdpt = 0000000000000000 *pde = 0000000000000000 Bad pagetable: 000f [#1] PREEMPT SMP Ingo guesses this got introduced by commit 611ae8e3f520 ("x86/tlb: enable tlb flush range support for x86") since that code started to free unused pagetables. On x86-32 PAE kernels, that new code has the potential to free an entire PMD page and will clear one of the four page-directory-pointer-table (aka pgd_t entries). The hardware aggressively "caches" these top-level entries and invlpg does not actually affect the CPU's copy. If we clear one we *HAVE* to do a full TLB flush, otherwise we might continue using a freed pmd page. (note, we do this properly on the population side in pud_populate()). This patch tracks whenever we clear one of these entries in the 'struct mmu_gather', and ensures that we follow up with a full tlb flush. BTW, I disassembled and checked that: if (tlb->fullmm == 0) and if (!tlb->fullmm && !tlb->need_flush_all) generate essentially the same code, so there should be zero impact there to the !PAE case. Signed-off-by: Dave Hansen Cc: Peter Anvin Cc: Ingo Molnar Cc: Artem S Tashkinov Signed-off-by: Linus Torvalds --- arch/x86/include/asm/tlb.h | 2 +- arch/x86/mm/pgtable.c | 7 +++++++ include/asm-generic/tlb.h | 7 ++++++- mm/memory.c | 1 + 4 files changed, 15 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4fef20773b8f..c7797307fc2b 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -7,7 +7,7 @@ #define tlb_flush(tlb) \ { \ - if (tlb->fullmm == 0) \ + if (!tlb->fullmm && !tlb->need_flush_all) \ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ else \ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 193350b51f90..17fda6a8b3c2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); + /* + * NOTE! For PAE, any changes to the top page-directory-pointer-table + * entries need a full cr3 reload to flush. + */ +#ifdef CONFIG_X86_PAE + tlb->need_flush_all = 1; +#endif tlb_remove_page(tlb, virt_to_page(pmd)); } diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 25f01d0bc149..b1b1fa6ffffe 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -99,7 +99,12 @@ struct mmu_gather { unsigned int need_flush : 1, /* Did free PTEs */ fast_mode : 1; /* No batching */ - unsigned int fullmm; + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ + unsigned int fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; diff --git a/mm/memory.c b/mm/memory.c index 494526ae024a..13cbc420fead 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->mm = mm; tlb->fullmm = fullmm; + tlb->need_flush_all = 0; tlb->start = -1UL; tlb->end = 0; tlb->need_flush = 0; -- cgit v1.2.3 From 05e38e5d5d437c762c79428ae8434632a8ca2c5e Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Mon, 15 Apr 2013 11:44:14 +1000 Subject: powerpc: Fix audit crash due to save/restore PPR changes The current mainline crashes when hitting userspace with the following: kernel BUG at kernel/auditsc.c:1769! cpu 0x1: Vector: 700 (Program Check) at [c000000023883a60] pc: c0000000001047a8: .__audit_syscall_entry+0x38/0x130 lr: c00000000000ed64: .do_syscall_trace_enter+0xc4/0x270 sp: c000000023883ce0 msr: 8000000000029032 current = 0xc000000023800000 paca = 0xc00000000f080380 softe: 0 irq_happened: 0x01 pid = 1629, comm = start_udev kernel BUG at kernel/auditsc.c:1769! enter ? for help [c000000023883d80] c00000000000ed64 .do_syscall_trace_enter+0xc4/0x270 [c000000023883e30] c000000000009b08 syscall_dotrace+0xc/0x38 --- Exception: c00 (System Call) at 0000008010ec50dc Bisecting found the following patch caused it: commit 44e9309f1f357794b7ae93d5f3e3e6f11d2b8a7f Author: Haren Myneni powerpc: Implement PPR save/restore It was found this patch corrupted r9 when calling SET_DEFAULT_THREAD_PPR() Using r10 as a scratch register instead of r9 solved the problem. Signed-off-by: Alistair Popple Acked-by: Michael Neuling Signed-off-by: Stephen Rothwell --- arch/powerpc/kernel/entry_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 256c5bf0adb7..3acb1a076d3f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -304,7 +304,7 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything else left to do? */ - SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ + SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite -- cgit v1.2.3 From d8b92292408831d86ff7b781e66bf79301934b99 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 9 Apr 2013 22:31:24 +0000 Subject: powerpc: add a missing label in resume_kernel A label 0 was missed in the patch a9c4e541 (powerpc/kprobe: Complete kprobe and migrate exception frame). This will cause the kernel branch to an undetermined address if there really has a conflict when updating the thread flags. Signed-off-by: Kevin Hao Cc: stable@vger.kernel.org Acked-By: Tiejun Chen Signed-off-by: Stephen Rothwell --- arch/powerpc/kernel/entry_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3acb1a076d3f..04d69c4a5ac2 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -657,7 +657,7 @@ resume_kernel: /* Clear _TIF_EMULATE_STACK_STORE flag */ lis r11,_TIF_EMULATE_STACK_STORE@h addi r5,r9,TI_FLAGS - ldarx r4,0,r5 +0: ldarx r4,0,r5 andc r4,r4,r11 stdcx. r4,0,r5 bne- 0b -- cgit v1.2.3 From cc5a080c5d40c36089bb08a8a16fa3fc7047fe0f Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 15 Apr 2013 13:09:46 -0700 Subject: efi: Pass boot services variable info to runtime code EFI variables can be flagged as being accessible only within boot services. This makes it awkward for us to figure out how much space they use at runtime. In theory we could figure this out by simply comparing the results from QueryVariableInfo() to the space used by all of our variables, but that fails if the platform doesn't garbage collect on every boot. Thankfully, calling QueryVariableInfo() while still inside boot services gives a more reliable answer. This patch passes that information from the EFI boot stub up to the efi platform code. Signed-off-by: Matthew Garrett Signed-off-by: Matt Fleming --- arch/x86/boot/compressed/eboot.c | 47 +++++++++++++++++++++++++++++++++++ arch/x86/include/asm/efi.h | 7 ++++++ arch/x86/include/uapi/asm/bootparam.h | 1 + arch/x86/platform/efi/efi.c | 21 ++++++++++++++++ 4 files changed, 76 insertions(+) (limited to 'arch') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035a6b96..8615f7581820 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) *size = len; } +static efi_status_t setup_efi_vars(struct boot_params *params) +{ + struct setup_data *data; + struct efi_var_bootdata *efidata; + u64 store_size, remaining_size, var_size; + efi_status_t status; + + if (!sys_table->runtime->query_variable_info) + return EFI_UNSUPPORTED; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + status = efi_call_phys4(sys_table->runtime->query_variable_info, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, &store_size, + &remaining_size, &var_size); + + if (status != EFI_SUCCESS) + return status; + + status = efi_call_phys3(sys_table->boottime->allocate_pool, + EFI_LOADER_DATA, sizeof(*efidata), &efidata); + + if (status != EFI_SUCCESS) + return status; + + efidata->data.type = SETUP_EFI_VARS; + efidata->data.len = sizeof(struct efi_var_bootdata) - + sizeof(struct setup_data); + efidata->data.next = 0; + efidata->store_size = store_size; + efidata->remaining_size = remaining_size; + efidata->max_var_size = var_size; + + if (data) + data->next = (unsigned long)efidata; + else + params->hdr.setup_data = (unsigned long)efidata; + +} + static efi_status_t setup_efi_pci(struct boot_params *params) { efi_pci_io_protocol *pci; @@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, setup_graphics(boot_params); + setup_efi_vars(boot_params); + setup_efi_pci(boot_params); status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 60c89f30c727..2fb5d5884e23 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +struct efi_var_bootdata { + struct setup_data data; + u64 store_size; + u64 remaining_size; + u64 max_var_size; +}; + #ifdef CONFIG_EFI static inline bool efi_is_native(void) diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..08744242b8d2 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI_VARS 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 3f96a487aa2a..977d1ce7e173 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -69,6 +69,10 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static u64 efi_var_store_size; +static u64 efi_var_remaining_size; +static u64 efi_var_max_var_size; + unsigned long x86_efi_facility; /* @@ -682,6 +686,9 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; void *tmp; + struct setup_data *data; + struct efi_var_bootdata *efi_var_data; + u64 pa_data; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -699,6 +706,20 @@ void __init efi_init(void) if (efi_systab_init(efi_phys.systab)) return; + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = early_ioremap(pa_data, sizeof(*efi_var_data)); + if (data->type == SETUP_EFI_VARS) { + efi_var_data = (struct efi_var_bootdata *)data; + + efi_var_store_size = efi_var_data->store_size; + efi_var_remaining_size = efi_var_data->remaining_size; + efi_var_max_var_size = efi_var_data->max_var_size; + } + pa_data = data->next; + early_iounmap(data, sizeof(*efi_var_data)); + } + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* -- cgit v1.2.3 From 31ff2f20d9003e74991d135f56e503fe776c127c Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 15 Apr 2013 13:09:47 -0700 Subject: efi: Distinguish between "remaining space" and actually used space EFI implementations distinguish between space that is actively used by a variable and space that merely hasn't been garbage collected yet. Space that hasn't yet been garbage collected isn't available for use and so isn't counted in the remaining_space field returned by QueryVariableInfo(). Combined with commit 68d9298 this can cause problems. Some implementations don't garbage collect until the remaining space is smaller than the maximum variable size, and as a result check_var_size() will always fail once more than 50% of the variable store has been used even if most of that space is marked as available for garbage collection. The user is unable to create new variables, and deleting variables doesn't increase the remaining space. The problem that 68d9298 was attempting to avoid was one where certain platforms fail if the actively used space is greater than 50% of the available storage space. We should be able to calculate that by simply summing the size of each available variable and subtracting that from the total storage space. With luck this will fix the problem described in https://bugzilla.kernel.org/show_bug.cgi?id=55471 without permitting damage to occur to the machines 68d9298 was attempting to fix. Signed-off-by: Matthew Garrett Signed-off-by: Matt Fleming --- arch/x86/platform/efi/efi.c | 106 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 100 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 977d1ce7e173..4959e3f89d74 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -51,6 +52,13 @@ #define EFI_DEBUG 1 +/* + * There's some additional metadata associated with each + * variable. Intel's reference implementation is 60 bytes - bump that + * to account for potential alignment constraints + */ +#define VAR_METADATA_SIZE 64 + struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, @@ -72,6 +80,9 @@ static efi_system_table_t efi_systab __initdata; static u64 efi_var_store_size; static u64 efi_var_remaining_size; static u64 efi_var_max_var_size; +static u64 boot_used_size; +static u64 boot_var_size; +static u64 active_size; unsigned long x86_efi_facility; @@ -166,8 +177,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - return efi_call_virt3(get_next_variable, - name_size, name, vendor); + efi_status_t status; + static bool finished = false; + static u64 var_size; + + status = efi_call_virt3(get_next_variable, + name_size, name, vendor); + + if (status == EFI_NOT_FOUND) { + finished = true; + if (var_size < boot_used_size) { + boot_var_size = boot_used_size - var_size; + active_size += boot_var_size; + } else { + printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); + } + } + + if (boot_used_size && !finished) { + unsigned long size; + u32 attr; + efi_status_t s; + void *tmp; + + s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); + + if (s != EFI_BUFFER_TOO_SMALL || !size) + return status; + + tmp = kmalloc(size, GFP_ATOMIC); + + if (!tmp) + return status; + + s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); + + if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { + var_size += size; + var_size += ucs2_strsize(name, 1024); + active_size += size; + active_size += VAR_METADATA_SIZE; + active_size += ucs2_strsize(name, 1024); + } + + kfree(tmp); + } + + return status; } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -176,9 +232,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - return efi_call_virt5(set_variable, - name, vendor, attr, - data_size, data); + efi_status_t status; + u32 orig_attr = 0; + unsigned long orig_size = 0; + + status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, + NULL); + + if (status != EFI_BUFFER_TOO_SMALL) + orig_size = 0; + + status = efi_call_virt5(set_variable, + name, vendor, attr, + data_size, data); + + if (status == EFI_SUCCESS) { + if (orig_size) { + active_size -= orig_size; + active_size -= ucs2_strsize(name, 1024); + active_size -= VAR_METADATA_SIZE; + } + if (data_size) { + active_size += data_size; + active_size += ucs2_strsize(name, 1024); + active_size += VAR_METADATA_SIZE; + } + } + + return status; } static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -720,6 +801,8 @@ void __init efi_init(void) early_iounmap(data, sizeof(*efi_var_data)); } + boot_used_size = efi_var_store_size - efi_var_remaining_size; + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* @@ -1042,10 +1125,21 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) if (!max_size && remaining_size > size) printk_once(KERN_ERR FW_BUG "Broken EFI implementation" " is returning MaxVariableSize=0\n"); + /* + * Some firmware implementations refuse to boot if there's insufficient + * space in the variable store. We account for that by refusing the + * write if permitting it would reduce the available space to under + * 50%. However, some firmware won't reclaim variable space until + * after the used (not merely the actively used) space drops below + * a threshold. We can approximate that case with the value calculated + * above. If both the firmware and our calculations indicate that the + * available space would drop below 50%, refuse the write. + */ if (!storage_size || size > remaining_size || (max_size && size > max_size) || - (remaining_size - size) < (storage_size / 2)) + ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && + (remaining_size - size < storage_size / 2))) return EFI_OUT_OF_RESOURCES; return EFI_SUCCESS; -- cgit v1.2.3 From f1923820c447e986a9da0fc6bf60c1dccdf0408e Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 16 Apr 2013 13:51:43 +0200 Subject: perf/x86: Fix offcore_rsp valid mask for SNB/IVB The valid mask for both offcore_response_0 and offcore_response_1 was wrong for SNB/SNB-EP, IVB/IVB-EP. It was possible to write to reserved bit and cause a GP fault crashing the kernel. This patch fixes the problem by correctly marking the reserved bits in the valid mask for all the processors mentioned above. A distinction between desktop and server parts is introduced because bits 24-30 are only available on the server parts. This version of the patch is just a rebase to perf/urgent tree and should apply to older kernels as well. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: jolsa@redhat.com Cc: gregkh@linuxfoundation.org Cc: security@kernel.org Cc: ak@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index dab7580c47ae..cc45deb791b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), + EVENT_EXTRA_END +}; + +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), EVENT_EXTRA_END }; @@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 45) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; @@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_ivb_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 62) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; -- cgit v1.2.3 From f6ce5002629e08eaa777ced3872a98f76845143e Mon Sep 17 00:00:00 2001 From: Sergey Vlasov Date: Tue, 16 Apr 2013 18:31:08 +0400 Subject: x86/Kconfig: Make EFI select UCS2_STRING The commit "efi: Distinguish between "remaining space" and actually used space" added usage of ucs2_*() functions to arch/x86/platform/efi/efi.c, but the only thing which selected UCS2_STRING was EFI_VARS, which is technically optional and can be built as a module. Signed-off-by: Sergey Vlasov Signed-off-by: Matt Fleming --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a4f24f5b1218..01af8535f58e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1549,6 +1549,7 @@ config X86_SMAP config EFI bool "EFI runtime service support" depends on ACPI + select UCS2_STRING ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). -- cgit v1.2.3 From 3668011d4ad556224f7c012c1e870a6eaa0e59da Mon Sep 17 00:00:00 2001 From: Sergey Vlasov Date: Tue, 16 Apr 2013 18:31:09 +0400 Subject: efi: Export efi_query_variable_store() for efivars.ko Fixes build with CONFIG_EFI_VARS=m which was broken after the commit "x86, efivars: firmware bug workarounds should be in platform code". Signed-off-by: Sergey Vlasov Signed-off-by: Matt Fleming --- arch/x86/platform/efi/efi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 4959e3f89d74..4f364c7c6111 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -1144,3 +1144,4 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) return EFI_SUCCESS; } +EXPORT_SYMBOL_GPL(efi_query_variable_store); -- cgit v1.2.3 From ca46e10fb239d4646cb19620695473f252a6ffc7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Apr 2013 10:43:13 +0100 Subject: ARM: KVM: fix KVM_CAP_ARM_SET_DEVICE_ADDR reporting Commit 3401d54696f9 (KVM: ARM: Introduce KVM_ARM_SET_DEVICE_ADDR ioctl) added support for the KVM_CAP_ARM_SET_DEVICE_ADDR capability, but failed to add a break in the relevant case statement, returning the number of CPUs instead. Luckilly enough, the CONFIG_NR_CPUS=0 patch hasn't been merged yet (https://lkml.org/lkml/diff/2012/3/31/131/1), so the bug wasn't noticed. Just give it a break! Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/arm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb24..c1fe498983ac 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; + break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; -- cgit v1.2.3 From 865499ea90d399e0682bcce3ae7af24277633699 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 12 Apr 2013 14:00:16 +0100 Subject: ARM: KVM: fix L_PTE_S2_RDWR to actually be Read/Write Looks like our L_PTE_S2_RDWR definition is slightly wrong, and is actually write only (see ARM ARM Table B3-9, Stage 2 control of access permissions). Didn't make a difference for normal pages, as we OR the flags together, but I'm still wondering how it worked for Stage-2 mapped devices, such as the GIC. Brown paper bag time, again. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/pgtable-3level.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..86b8fe398b95 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. -- cgit v1.2.3 From 8c58bf3eec3b8fc8162fe557e9361891c20758f2 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Wed, 17 Apr 2013 01:00:53 +0200 Subject: x86,efi: Implement efi_no_storage_paranoia parameter Using this parameter one can disable the storage_size/2 check if he is really sure that the UEFI does sane gc and fulfills the spec. This parameter is useful if a devices uses more than 50% of the storage by default. The Intel DQSW67 desktop board is such a sucker for exmaple. Signed-off-by: Richard Weinberger Signed-off-by: Matt Fleming --- Documentation/kernel-parameters.txt | 6 ++++++ arch/x86/platform/efi/efi.c | 14 +++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4609e81dbc37..d1cc3a9fa14f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -788,6 +788,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. edd= [EDD] Format: {"off" | "on" | "skip[mbr]"} + efi_no_storage_paranoia [EFI; X86] + Using this parameter you can use more than 50% of + your efi variable storage. Use this parameter only if + you are really sure that your UEFI does sane gc and + fulfills the spec otherwise your board may brick. + eisa_irq_edge= [PARISC,HW] See header of drivers/parisc/eisa.c. diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 4f364c7c6111..e4a86a677ce1 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -113,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg) } early_param("add_efi_memmap", setup_add_efi_memmap); +static bool efi_no_storage_paranoia; + +static int __init setup_storage_paranoia(char *arg) +{ + efi_no_storage_paranoia = true; + return 0; +} +early_param("efi_no_storage_paranoia", setup_storage_paranoia); + static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { @@ -1137,7 +1146,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) */ if (!storage_size || size > remaining_size || - (max_size && size > max_size) || + (max_size && size > max_size)) + return EFI_OUT_OF_RESOURCES; + + if (!efi_no_storage_paranoia && ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && (remaining_size - size < storage_size / 2))) return EFI_OUT_OF_RESOURCES; -- cgit v1.2.3 From 4f2e29031e6c67802e7370292dd050fd62f337ee Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 17 Apr 2013 08:46:19 -0700 Subject: s390: move dummy io_remap_pfn_range() to asm/pgtable.h Commit b4cbb197c7e7 ("vm: add vm_iomap_memory() helper function") added a helper function wrapper around io_remap_pfn_range(), and every other architecture defined it in . The s390 choice of may make sense, but is not very convenient for this case, and gratuitous differences like that cause unexpected errors like this: mm/memory.c: In function 'vm_iomap_memory': mm/memory.c:2439:2: error: implicit declaration of function 'io_remap_pfn_range' [-Werror=implicit-function-declaration] Glory be the kbuild test robot who noticed this, bisected it, and reported it to the guilty parties (ie me). Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Linus Torvalds --- arch/s390/include/asm/io.h | 4 ---- arch/s390/include/asm/pgtable.h | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb32185ce1..379d96e2105e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); #define ioremap_nocache(addr, size) ioremap(addr, size) #define ioremap_wc ioremap_nocache -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a5443118cfb..3cb47cf02530 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -57,6 +57,10 @@ extern unsigned long zero_page_mask; (((unsigned long)(vaddr)) &zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* -- cgit v1.2.3 From f5d6a1441a5045824f36ff7c6b6bbae0373472a6 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Wed, 3 Apr 2013 22:28:41 +0100 Subject: ARM: 7692/1: iop3xx: move IOP3XX_PERIPHERAL_VIRT_BASE Currently IOP3XX_PERIPHERAL_VIRT_BASE conflicts with PCI_IO_VIRT_BASE: address size PCI_IO_VIRT_BASE 0xfee00000 0x200000 IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 0x2000 Fix by moving IOP3XX_PERIPHERAL_VIRT_BASE below PCI_IO_VIRT_BASE. The patch fixes the following kernel panic with 3.9-rc1 on iop3xx boards: [ 0.000000] Booting Linux on physical CPU 0x0 [ 0.000000] Initializing cgroup subsys cpu [ 0.000000] Linux version 3.9.0-rc1-iop32x (aaro@blackmetal) (gcc version 4.7.2 (GCC) ) #20 PREEMPT Tue Mar 5 16:44:36 EET 2013 [ 0.000000] bootconsole [earlycon0] enabled [ 0.000000] ------------[ cut here ]------------ [ 0.000000] kernel BUG at mm/vmalloc.c:1145! [ 0.000000] Internal error: Oops - BUG: 0 [#1] PREEMPT ARM [ 0.000000] Modules linked in: [ 0.000000] CPU: 0 Not tainted (3.9.0-rc1-iop32x #20) [ 0.000000] PC is at vm_area_add_early+0x4c/0x88 [ 0.000000] LR is at add_static_vm_early+0x14/0x68 [ 0.000000] pc : [] lr : [] psr: 800000d3 [ 0.000000] sp : c03ffee4 ip : dfffdf88 fp : c03ffef4 [ 0.000000] r10: 00000002 r9 : 000000cf r8 : 00000653 [ 0.000000] r7 : c040eca8 r6 : c03e2408 r5 : dfffdf60 r4 : 00200000 [ 0.000000] r3 : dfffdfd8 r2 : feffe000 r1 : ff000000 r0 : dfffdf60 [ 0.000000] Flags: Nzcv IRQs off FIQs off Mode SVC_32 ISA ARM Segment kernel [ 0.000000] Control: 0000397f Table: a0004000 DAC: 00000017 [ 0.000000] Process swapper (pid: 0, stack limit = 0xc03fe1b8) [ 0.000000] Stack: (0xc03ffee4 to 0xc0400000) [ 0.000000] fee0: 00200000 c03fff0c c03ffef8 c03e1c40 c03e7468 00200000 fee00000 [ 0.000000] ff00: c03fff2c c03fff10 c03e23e4 c03e1c38 feffe000 c0408ee4 ff000000 c0408f04 [ 0.000000] ff20: c03fff3c c03fff30 c03e2434 c03e23b4 c03fff84 c03fff40 c03e2c94 c03e2414 [ 0.000000] ff40: c03f8878 c03f6410 ffff0000 000bffff 00001000 00000008 c03fff84 c03f6410 [ 0.000000] ff60: c04227e8 c03fffd4 a0008000 c03f8878 69052e30 c02f96eb c03fffbc c03fff88 [ 0.000000] ff80: c03e044c c03e268c 00000000 0000397f c0385130 00000001 ffffffff c03f8874 [ 0.000000] ffa0: dfffffff a0004000 69052e30 a03f61a0 c03ffff4 c03fffc0 c03dd5cc c03e0184 [ 0.000000] ffc0: 00000000 00000000 00000000 00000000 00000000 c03f8878 0000397d c040601c [ 0.000000] ffe0: c03f8874 c0408674 00000000 c03ffff8 a0008040 c03dd558 00000000 00000000 [ 0.000000] Backtrace: [ 0.000000] [] (vm_area_add_early+0x0/0x88) from [] (add_static_vm_early+0x14/0x68) Tested-by: Mikael Pettersson Signed-off-by: Aaro Koskinen Signed-off-by: Russell King --- arch/arm/include/asm/hardware/iop3xx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fbe2477..ed94b1a366ae 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) -- cgit v1.2.3 From de40614e92bf1b0308d953387b0cb9d3a5710186 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Fri, 5 Apr 2013 03:16:51 +0100 Subject: ARM: 7694/1: ARM, TCM: initialize TCM in paging_init(), instead of setup_arch() tcm_init() call iotable_init() and it use early_alloc variants which do memblock allocation. Directly using memblock allocation after initializing bootmem should not permitted, because bootmem can't know where are additinally reserved. So move tcm_init() to a safe place before initalizing bootmem. (On the U300) Tested-by: Linus Walleij Signed-off-by: Joonsoo Kim Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 3 --- arch/arm/kernel/tcm.c | 1 - arch/arm/kernel/tcm.h | 17 ----------------- arch/arm/mm/mmu.c | 2 ++ arch/arm/mm/tcm.h | 17 +++++++++++++++++ 5 files changed, 19 insertions(+), 21 deletions(-) delete mode 100644 arch/arm/kernel/tcm.h create mode 100644 arch/arm/mm/tcm.h (limited to 'arch') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index d343a6c3a6d1..234e339196c0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -56,7 +56,6 @@ #include #include "atags.h" -#include "tcm.h" #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) @@ -798,8 +797,6 @@ void __init setup_arch(char **cmdline_p) reserve_crashkernel(); - tcm_init(); - #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb4a310..f50f19e5c138 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -17,7 +17,6 @@ #include #include #include -#include "tcm.h" static struct gen_pool *tcm_pool; static bool dtcm_present; diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h deleted file mode 100644 index 8015ad434a40..000000000000 --- a/arch/arm/kernel/tcm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2008-2009 ST-Ericsson AB - * License terms: GNU General Public License (GPL) version 2 - * TCM memory handling for ARM systems - * - * Author: Linus Walleij - * Author: Rickard Andersson - */ - -#ifdef CONFIG_HAVE_TCM -void __init tcm_init(void); -#else -/* No TCM support, just blank inlines to be optimized out */ -inline void tcm_init(void) -{ -} -#endif diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 78978945492a..a84ff763ac39 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -34,6 +34,7 @@ #include #include "mm.h" +#include "tcm.h" /* * empty_zero_page is a special page that is used for @@ -1277,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc) dma_contiguous_remap(); devicemaps_init(mdesc); kmap_init(); + tcm_init(); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h new file mode 100644 index 000000000000..8015ad434a40 --- /dev/null +++ b/arch/arm/mm/tcm.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2008-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * TCM memory handling for ARM systems + * + * Author: Linus Walleij + * Author: Rickard Andersson + */ + +#ifdef CONFIG_HAVE_TCM +void __init tcm_init(void); +#else +/* No TCM support, just blank inlines to be optimized out */ +inline void tcm_init(void) +{ +} +#endif -- cgit v1.2.3 From cd272d1ea71583170e95dde02c76166c7f9017e6 Mon Sep 17 00:00:00 2001 From: Illia Ragozin Date: Wed, 10 Apr 2013 19:43:34 +0100 Subject: ARM: 7696/1: Fix kexec by setting outer_cache.inv_all for Feroceon On Feroceon the L2 cache becomes non-coherent with the CPU when the L1 caches are disabled. Thus the L2 needs to be invalidated after both L1 caches are disabled. On kexec before the starting the code for relocation the kernel, the L1 caches are disabled in cpu_froc_fin (cpu_v7_proc_fin for Feroceon), but after L2 cache is never invalidated, because inv_all is not set in cache-feroceon-l2.c. So kernel relocation and decompression may has (and usually has) errors. Setting the function enables L2 invalidation and fixes the issue. Cc: Signed-off-by: Illia Ragozin Acked-by: Jason Cooper Signed-off-by: Russell King --- arch/arm/mm/cache-feroceon-l2.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc3..48bc3c0a87ce 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); -- cgit v1.2.3 From 50acff3c1f9ee9753684e676929b82926f15966c Mon Sep 17 00:00:00 2001 From: Bastian Hecht Date: Fri, 12 Apr 2013 19:03:50 +0100 Subject: ARM: 7697/1: hw_breakpoint: do not use __cpuinitdata for dbg_cpu_pm_nb We must not declare dbg_cpu_pm_nb as __cpuinitdata as we need it after system initialization for Suspend and CPUIdle. This was done in commit 9a6eb310eaa5 ("ARM: hw_breakpoint: Debug powerdown support for self-hosted debug"). Cc: stable@vger.kernel.org Cc: Dietmar Eggemann Signed-off-by: Bastian Hecht Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/hw_breakpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5dc1aa6f0f7d..1fd749ee4a1b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +static struct notifier_block dbg_cpu_pm_nb = { .notifier_call = dbg_cpu_pm_notify, }; -- cgit v1.2.3 From cb2d8b342aa084d1f3ac29966245dec9163677fb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 12 Apr 2013 19:04:19 +0100 Subject: ARM: 7698/1: perf: fix group validation when using enable_on_exec Events may be created with attr->disabled == 1 and attr->enable_on_exec == 1, which confuses the group validation code because events with the PERF_EVENT_STATE_OFF are not considered candidates for scheduling, which may lead to failure at group scheduling time. This patch fixes the validation check for ARM, so that events in the OFF state are still considered when enable_on_exec is true. Cc: stable@vger.kernel.org Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Reported-by: Sudeep KarkadaNagesha Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 146157dfe27c..8c3094d0f7b7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; -- cgit v1.2.3 From c729de8fcea37a1c444e81857eace12494c804a9 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 15 Apr 2013 22:23:45 -0700 Subject: x86, kdump: Set crashkernel_low automatically Chao said that kdump does does work well on his system on 3.8 without extra parameter, even iommu does not work with kdump. And now have to append crashkernel_low=Y in first kernel to make kdump work. We have now modified crashkernel=X to allocate memory beyong 4G (if available) and do not allocate low range for crashkernel if the user does not specify that with crashkernel_low=Y. This causes regression if iommu is not enabled. Without iommu, swiotlb needs to be setup in first 4G and there is no low memory available to second kernel. Set crashkernel_low automatically if the user does not specify that. For system that does support IOMMU with kdump properly, user could specify crashkernel_low=0 to save that 72M low ram. -v3: add swiotlb_size() according to Konrad. -v4: add comments what 8M is for according to hpa. also update more crashkernel_low= in kernel-parameters.txt -v5: update changelog according to Vivek. -v6: Change description about swiotlb referring according to HATAYAMA. Reported-by: WANG Chao Tested-by: WANG Chao Signed-off-by: Yinghai Lu Link: http://lkml.kernel.org/r/1366089828-19692-2-git-send-email-yinghai@kernel.org Acked-by: Vivek Goyal Signed-off-by: H. Peter Anvin --- Documentation/kernel-parameters.txt | 14 +++++++++++--- arch/x86/kernel/setup.c | 21 ++++++++++++++++++--- include/linux/swiotlb.h | 1 + lib/swiotlb.c | 19 +++++++++++++++---- 4 files changed, 45 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4609e81dbc37..cff672da2486 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. is selected automatically. Check Documentation/kdump/kdump.txt for further details. - crashkernel_low=size[KMG] - [KNL, x86] parts under 4G. - crashkernel=range1:size1[,range2:size2,...][@offset] [KNL] Same as above, but depends on the memory in the running system. The syntax of range is @@ -606,6 +603,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted. a memory unit (amount[KMG]). See also Documentation/kdump/kdump.txt for an example. + crashkernel_low=size[KMG] + [KNL, x86_64] range under 4G. When crashkernel= is + passed, kernel allocate physical memory region + above 4G, that cause second kernel crash on system + that require some amount of low memory, e.g. swiotlb + requires at least 64M+32K low memory. Kernel would + try to allocate 72M below 4G automatically. + This one let user to specify own low range under 4G + for second kernel instead. + 0: to disable low allocation. + cs89x0_dma= [HW,NET] Format: diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 90d8cc930f5e..12349202cae7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -521,19 +521,34 @@ static void __init reserve_crashkernel_low(void) unsigned long long low_base = 0, low_size = 0; unsigned long total_low_mem; unsigned long long base; + bool auto_set = false; int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); - if (ret != 0 || low_size <= 0) - return; + if (ret != 0) { + /* + * two parts from lib/swiotlb.c: + * swiotlb size: user specified with swiotlb= or default. + * swiotlb overflow buffer: now is hardcoded to 32k. + * We round it to 8M for other buffers that + * may need to stay low too. + */ + low_size = swiotlb_size_or_default() + (8UL<<20); + auto_set = true; + } else { + /* passed with crashkernel_low=0 ? */ + if (!low_size) + return; + } low_base = memblock_find_in_range(low_size, (1ULL<<32), low_size, alignment); if (!low_base) { - pr_info("crashkernel low reservation failed - No suitable area found.\n"); + if (!auto_set) + pr_info("crashkernel low reservation failed - No suitable area found.\n"); return; } diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 2de42f9401d2..a5ffd32642fd 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -25,6 +25,7 @@ extern int swiotlb_force; extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); extern unsigned long swiotlb_nr_tbl(void); +unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); /* diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55b..d23762e6652c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str) if (!strcmp(str, "force")) swiotlb_force = 1; - return 1; + return 0; } -__setup("swiotlb=", setup_io_tlb_npages); +early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) @@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void) return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ + unsigned long size; + + size = io_tlb_nslabs << IO_TLB_SHIFT; + + return size ? size : (IO_TLB_DEFAULT_SIZE); +} + /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - /* default to 64MB */ - size_t default_size = 64UL<<20; + size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; -- cgit v1.2.3 From 55a20ee7804ab64ac90bcdd4e2868a42829e2784 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 15 Apr 2013 22:23:47 -0700 Subject: x86, kdump: Retore crashkernel= to allocate under 896M Vivek found old kexec-tools does not work new kernel anymore. So change back crashkernel= back to old behavoir, and add crashkernel_high= to let user decide if buffer could be above 4G, and also new kexec-tools will be needed. -v2: let crashkernel=X override crashkernel_high= update description about _high will be ignored by crashkernel=X -v3: update description about kernel-parameters.txt according to Vivek. Signed-off-by: Yinghai Lu Link: http://lkml.kernel.org/r/1366089828-19692-4-git-send-email-yinghai@kernel.org Acked-by: Vivek Goyal Signed-off-by: H. Peter Anvin --- Documentation/kernel-parameters.txt | 13 +++++++++++-- arch/x86/kernel/setup.c | 24 +++++++++++++++++++----- include/linux/kexec.h | 2 ++ kernel/kexec.c | 9 +++++++++ 4 files changed, 41 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index cff672da2486..709eb3edc6b2 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -603,9 +603,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted. a memory unit (amount[KMG]). See also Documentation/kdump/kdump.txt for an example. + crashkernel_high=size[KMG] + [KNL, x86_64] range could be above 4G. Allow kernel + to allocate physical memory region from top, so could + be above 4G if system have more than 4G ram installed. + Otherwise memory region will be allocated below 4G, if + available. + It will be ignored if crashkernel=X is specified. crashkernel_low=size[KMG] - [KNL, x86_64] range under 4G. When crashkernel= is - passed, kernel allocate physical memory region + [KNL, x86_64] range under 4G. When crashkernel_high= is + passed, kernel could allocate physical memory region above 4G, that cause second kernel crash on system that require some amount of low memory, e.g. swiotlb requires at least 64M+32K low memory. Kernel would @@ -613,6 +620,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. This one let user to specify own low range under 4G for second kernel instead. 0: to disable low allocation. + It will be ignored when crashkernel_high=X is not used + or memory reserved is below 4G. cs89x0_dma= [HW,NET] Format: diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 12349202cae7..a85a144f2052 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void) /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. + * On 64bit, old kexec-tools need to under 896MiB. */ #ifdef CONFIG_X86_32 -# define CRASH_KERNEL_ADDR_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) #else -# define CRASH_KERNEL_ADDR_MAX MAXMEM +# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) +# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM #endif static void __init reserve_crashkernel_low(void) @@ -525,6 +528,7 @@ static void __init reserve_crashkernel_low(void) int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); + /* crashkernel_low=YM */ ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); if (ret != 0) { @@ -569,14 +573,22 @@ static void __init reserve_crashkernel(void) const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long total_mem; unsigned long long crash_size, crash_base; + bool high = false; int ret; total_mem = memblock_phys_mem_size(); + /* crashkernel=XM */ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); - if (ret != 0 || crash_size <= 0) - return; + if (ret != 0 || crash_size <= 0) { + /* crashkernel_high=XM */ + ret = parse_crashkernel_high(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret != 0 || crash_size <= 0) + return; + high = true; + } /* 0 means: find the address automatically */ if (crash_base <= 0) { @@ -584,7 +596,9 @@ static void __init reserve_crashkernel(void) * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX */ crash_base = memblock_find_in_range(alignment, - CRASH_KERNEL_ADDR_MAX, crash_size, alignment); + high ? CRASH_KERNEL_ADDR_HIGH_MAX : + CRASH_KERNEL_ADDR_LOW_MAX, + crash_size, alignment); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d2e6927bbaae..d78d28a733b1 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size; int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int crash_shrink_memory(unsigned long new_size); diff --git a/kernel/kexec.c b/kernel/kexec.c index bddd3d7a74b6..1b2f73f5f9b9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1422,6 +1422,15 @@ int __init parse_crashkernel(char *cmdline, "crashkernel="); } +int __init parse_crashkernel_high(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, + "crashkernel_high="); +} + int __init parse_crashkernel_low(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, -- cgit v1.2.3 From adbc742bf78695bb98c79d18c558b61571748b99 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 15 Apr 2013 22:23:48 -0700 Subject: x86, kdump: Change crashkernel_high/low= to crashkernel=,high/low Per hpa, use crashkernel=X,high crashkernel=Y,low instead of crashkernel_hign=X crashkernel_low=Y. As that could be extensible. -v2: according to Vivek, change delimiter to ; -v3: let hign and low only handle simple form and it conforms to description in kernel-parameters.txt still keep crashkernel=X override any crashkernel=X,high crashkernel=Y,low -v4: update get_last_crashkernel returning and add more strict checking in parse_crashkernel_simple() found by HATAYAMA. -v5: Change delimiter back to , according to HPA. also separate parse_suffix from parse_simper according to vivek. so we can avoid @pos in that path. -v6: Tight the checking about crashkernel=X,highblahblah,high found by HTYAYAMA. Cc: HATAYAMA Daisuke Signed-off-by: Yinghai Lu Link: http://lkml.kernel.org/r/1366089828-19692-5-git-send-email-yinghai@kernel.org Acked-by: Vivek Goyal Signed-off-by: H. Peter Anvin --- Documentation/kernel-parameters.txt | 10 ++-- arch/x86/kernel/setup.c | 6 +- kernel/kexec.c | 109 +++++++++++++++++++++++++++++++----- 3 files changed, 104 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 709eb3edc6b2..a1ac1f1d6d34 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -603,16 +603,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted. a memory unit (amount[KMG]). See also Documentation/kdump/kdump.txt for an example. - crashkernel_high=size[KMG] + crashkernel=size[KMG],high [KNL, x86_64] range could be above 4G. Allow kernel to allocate physical memory region from top, so could be above 4G if system have more than 4G ram installed. Otherwise memory region will be allocated below 4G, if available. It will be ignored if crashkernel=X is specified. - crashkernel_low=size[KMG] - [KNL, x86_64] range under 4G. When crashkernel_high= is - passed, kernel could allocate physical memory region + crashkernel=size[KMG],low + [KNL, x86_64] range under 4G. When crashkernel=X,high + is passed, kernel could allocate physical memory region above 4G, that cause second kernel crash on system that require some amount of low memory, e.g. swiotlb requires at least 64M+32K low memory. Kernel would @@ -620,7 +620,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. This one let user to specify own low range under 4G for second kernel instead. 0: to disable low allocation. - It will be ignored when crashkernel_high=X is not used + It will be ignored when crashkernel=X,high is not used or memory reserved is below 4G. cs89x0_dma= [HW,NET] diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a85a144f2052..fae9134a2de9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -528,7 +528,7 @@ static void __init reserve_crashkernel_low(void) int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); - /* crashkernel_low=YM */ + /* crashkernel=Y,low */ ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); if (ret != 0) { @@ -542,7 +542,7 @@ static void __init reserve_crashkernel_low(void) low_size = swiotlb_size_or_default() + (8UL<<20); auto_set = true; } else { - /* passed with crashkernel_low=0 ? */ + /* passed with crashkernel=0,low ? */ if (!low_size) return; } @@ -582,7 +582,7 @@ static void __init reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) { - /* crashkernel_high=XM */ + /* crashkernel=X,high */ ret = parse_crashkernel_high(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) diff --git a/kernel/kexec.c b/kernel/kexec.c index 1b2f73f5f9b9..401fdb041f35 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1368,35 +1368,114 @@ static int __init parse_crashkernel_simple(char *cmdline, return 0; } +#define SUFFIX_HIGH 0 +#define SUFFIX_LOW 1 +#define SUFFIX_NULL 2 +static __initdata char *suffix_tbl[] = { + [SUFFIX_HIGH] = ",high", + [SUFFIX_LOW] = ",low", + [SUFFIX_NULL] = NULL, +}; + /* - * That function is the entry point for command line parsing and should be - * called from the arch-specific code. + * That function parses "suffix" crashkernel command lines like + * + * crashkernel=size,[high|low] + * + * It returns 0 on success and -EINVAL on failure. */ +static int __init parse_crashkernel_suffix(char *cmdline, + unsigned long long *crash_size, + unsigned long long *crash_base, + const char *suffix) +{ + char *cur = cmdline; + + *crash_size = memparse(cmdline, &cur); + if (cmdline == cur) { + pr_warn("crashkernel: memory value expected\n"); + return -EINVAL; + } + + /* check with suffix */ + if (strncmp(cur, suffix, strlen(suffix))) { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + cur += strlen(suffix); + if (*cur != ' ' && *cur != '\0') { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + + return 0; +} + +static __init char *get_last_crashkernel(char *cmdline, + const char *name, + const char *suffix) +{ + char *p = cmdline, *ck_cmdline = NULL; + + /* find crashkernel and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + char *q; + + if (!end_p) + end_p = p + strlen(p); + + if (!suffix) { + int i; + + /* skip the one with any known suffix */ + for (i = 0; suffix_tbl[i]; i++) { + q = end_p - strlen(suffix_tbl[i]); + if (!strncmp(q, suffix_tbl[i], + strlen(suffix_tbl[i]))) + goto next; + } + ck_cmdline = p; + } else { + q = end_p - strlen(suffix); + if (!strncmp(q, suffix, strlen(suffix))) + ck_cmdline = p; + } +next: + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + return ck_cmdline; +} + static int __init __parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base, - const char *name) + const char *name, + const char *suffix) { - char *p = cmdline, *ck_cmdline = NULL; char *first_colon, *first_space; + char *ck_cmdline; BUG_ON(!crash_size || !crash_base); *crash_size = 0; *crash_base = 0; - /* find crashkernel and use the last one if there are more */ - p = strstr(p, name); - while (p) { - ck_cmdline = p; - p = strstr(p+1, name); - } + ck_cmdline = get_last_crashkernel(cmdline, name, suffix); if (!ck_cmdline) return -EINVAL; ck_cmdline += strlen(name); + if (suffix) + return parse_crashkernel_suffix(ck_cmdline, crash_size, + crash_base, suffix); /* * if the commandline contains a ':', then that's the extended * syntax -- if not, it must be the classic syntax @@ -1413,13 +1492,17 @@ static int __init __parse_crashkernel(char *cmdline, return 0; } +/* + * That function is the entry point for command line parsing and should be + * called from the arch-specific code. + */ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base) { return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel="); + "crashkernel=", NULL); } int __init parse_crashkernel_high(char *cmdline, @@ -1428,7 +1511,7 @@ int __init parse_crashkernel_high(char *cmdline, unsigned long long *crash_base) { return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel_high="); + "crashkernel=", suffix_tbl[SUFFIX_HIGH]); } int __init parse_crashkernel_low(char *cmdline, @@ -1437,7 +1520,7 @@ int __init parse_crashkernel_low(char *cmdline, unsigned long long *crash_base) { return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel_low="); + "crashkernel=", suffix_tbl[SUFFIX_LOW]); } static void update_vmcoreinfo_note(void) -- cgit v1.2.3 From 15bbc1b28ff65767922f78c266821cc138b90a47 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 17 Apr 2013 12:09:09 -0700 Subject: ARM: KVM: fix unbalanced get_cpu() in access_dcsw In the very unlikely event where a guest would be foolish enough to *read* from a write-only cache maintainance register, we end up with preemption disabled, due to a misplaced get_cpu(). Just move the "is_write" test outside of the critical section. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Linus Torvalds --- arch/arm/kvm/coproc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a982269c..7bed7556077a 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, u32 val; int cpu; - cpu = get_cpu(); - if (!p->is_write) return read_from_write_only(vcpu, p); + cpu = get_cpu(); + cpumask_setall(&vcpu->arch.require_dcache_flush); cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); -- cgit v1.2.3 From 472d326677db37625903265428582694394d2df7 Mon Sep 17 00:00:00 2001 From: Josh Wu Date: Wed, 17 Apr 2013 15:58:31 -0700 Subject: avr32: fix build error in atstk1006_defconfig fixed the following compile error when use avr32 atstk1006_defconfig: drivers/mtd/nand/atmel_nand.c: In function 'pmecc_err_location': drivers/mtd/nand/atmel_nand.c:639: error: implicit declaration of function 'writel_relaxed' which was introduced by commit 1c7b874d33b4 ("mtd: at91: atmel_nand: add Programmable Multibit ECC controller support"). The PMECC for nand flash code uses writel_relaxed(). But in avr32, there is no macro "writel_relaxed" defined. This patch add writex_relaxed macro definitions. Signed-off-by: Josh Wu Acked-by: Havard Skinnemoen Acked-by: Hans-Christian Egtvedt Cc: David Woodhouse Cc: Artem Bityutskiy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/avr32/include/asm/io.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index cf60d0a9f176..fc6483f83ccc 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32) #define readw_be __raw_readw #define readl_be __raw_readl +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel + #define writeb_be __raw_writeb #define writew_be __raw_writew #define writel_be __raw_writel -- cgit v1.2.3 From 7eff7ded02d1b15ba8321664839b353fa6c0c1e4 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 18 Apr 2013 08:44:46 -0700 Subject: x86, hyperv: Handle Xen emulation of Hyper-V more gracefully Install the Hyper-V specific interrupt handler only when needed. This would permit us to get rid of the Xen check. Note that when the vmbus drivers invokes the call to register its handler, we are sure to be running on Hyper-V. Signed-off-by: K. Y. Srinivasan Link: http://lkml.kernel.org/r/1366299886-6399-1-git-send-email-kys@microsoft.com Acked-by: Michael S. Tsirkin Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mshyperv.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d83fb70..8f4be53ea04b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void) if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - /* - * Xen emulates Hyper-V to support enlightened Windows. - * Check to see first if we are on a Xen Hypervisor. - */ - if (xen_cpuid_base()) - return false; - cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); @@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void) if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); -#if IS_ENABLED(CONFIG_HYPERV) - /* - * Setup the IDT for hypervisor callback. - */ - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); -#endif } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { @@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr; void hv_register_vmbus_handler(int irq, irq_handler_t handler) { + /* + * Setup the IDT for hypervisor callback. + */ + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); + vmbus_irq = irq; vmbus_isr = handler; } -- cgit v1.2.3 From 73053d973dd6f56472309cffa5a5d15a62dd6f96 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 17 Apr 2013 10:46:52 -0500 Subject: ARM: highbank: fix cache flush ordering for cpu hotplug The L1 data cache flush needs to be after highbank_set_cpu_jump call which pollutes the cache with the l2x0_lock. This causes other cores to deadlock waiting for the l2x0_lock. Moving the flush of the entire data cache after highbank_set_cpu_jump fixes the problem. Use flush_cache_louis instead of flush_cache_all are that is sufficient to flush only the L1 data cache. flush_cache_louis did not exist when highbank_cpu_die was originally written. With PL310 errata 769419 enabled, a wmb is inserted into idle which takes the l2x0_lock. This makes the problem much more easily hit and causes reset to hang. Reported-by: Paolo Pisati Signed-off-by: Rob Herring Signed-off-by: Olof Johansson --- arch/arm/mach-highbank/hotplug.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index f30c52843396..890cae23c12a 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c @@ -28,13 +28,11 @@ extern void secondary_startup(void); */ void __ref highbank_cpu_die(unsigned int cpu) { - flush_cache_all(); - highbank_set_cpu_jump(cpu, phys_to_virt(0)); - highbank_set_core_pwr(); - cpu_do_idle(); + flush_cache_louis(); + highbank_set_core_pwr(); - /* We should never return from idle */ - panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); + while (1) + cpu_do_idle(); } -- cgit v1.2.3 From cea15092f098b7018e89f64a5a14bb71955965d5 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 18 Apr 2013 17:33:40 +0100 Subject: ARM: 7699/1: sched_clock: Add more notrace to prevent recursion cyc_to_sched_clock() is called by sched_clock() and cyc_to_ns() is called by cyc_to_sched_clock(). I suspect that some compilers inline both of these functions into sched_clock() and so we've been getting away without having a notrace marking. It seems that my compiler isn't inlining cyc_to_sched_clock() though, so I'm hitting a recursion bug when I enable the function graph tracer, causing my system to crash. Marking these functions notrace fixes it. Technically cyc_to_ns() doesn't need the notrace because it's already marked inline, but let's just add it so that if we ever remove inline from that function it doesn't blow up. Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/sched_clock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b9ec21..59d2adb764a9 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; -- cgit v1.2.3 From f36391d2790d04993f48da6a45810033a2cdf847 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 19 Apr 2013 17:26:26 -0400 Subject: sparc64: Fix race in TLB batch processing. As reported by Dave Kleikamp, when we emit cross calls to do batched TLB flush processing we have a race because we do not synchronize on the sibling cpus completing the cross call. So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.) and either flushes are missed or flushes will flush the wrong addresses. Fix this by using generic infrastructure to synchonize on the completion of the cross call. This first required getting the flush_tlb_pending() call out from switch_to() which operates with locks held and interrupts disabled. The problem is that smp_call_function_many() cannot be invoked with IRQs disabled and this is explicitly checked for with WARN_ON_ONCE(). We get the batch processing outside of locked IRQ disabled sections by using some ideas from the powerpc port. Namely, we only batch inside of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a region, we flush TLBs synchronously. 1) Get rid of xcall_flush_tlb_pending and per-cpu type implementations. 2) Do TLB batch cross calls instead via: smp_call_function_many() tlb_pending_func() __flush_tlb_pending() 3) Batch only in lazy mmu sequences: a) Add 'active' member to struct tlb_batch b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE c) Set 'active' in arch_enter_lazy_mmu_mode() d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode() e) Check 'active' in tlb_batch_add_one() and do a synchronous flush if it's clear. 4) Add infrastructure for synchronous TLB page flushes. a) Implement __flush_tlb_page and per-cpu variants, patch as needed. b) Likewise for xcall_flush_tlb_page. c) Implement smp_flush_tlb_page() to invoke the cross-call. d) Wire up global_flush_tlb_page() to the right routine based upon CONFIG_SMP 5) It turns out that singleton batches are very common, 2 out of every 3 batch flushes have only a single entry in them. The batch flush waiting is very expensive, both because of the poll on sibling cpu completeion, as well as because passing the tlb batch pointer to the sibling cpus invokes a shared memory dereference. Therefore, in flush_tlb_pending(), if there is only one entry in the batch perform a completely asynchronous global_flush_tlb_page() instead. Reported-by: Dave Kleikamp Signed-off-by: David S. Miller Acked-by: Dave Kleikamp --- arch/sparc/include/asm/pgtable_64.h | 1 + arch/sparc/include/asm/switch_to_64.h | 3 +- arch/sparc/include/asm/tlbflush_64.h | 37 +++++++++-- arch/sparc/kernel/smp_64.c | 41 ++++++++++-- arch/sparc/mm/tlb.c | 38 +++++++++-- arch/sparc/mm/tsb.c | 57 +++++++++++----- arch/sparc/mm/ultra.S | 119 +++++++++++++++++++++++++++------- 7 files changed, 241 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 08fcce90316b..7619f2f792af 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } +#include #include /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index cad36f56fa03..c7de3323819c 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -18,8 +18,7 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ +do { save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef463494153..f0d6a9700f4c 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; + unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr) flush_tlb_pending() -#define flush_tlb_mm(mm) flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode() do {} while (0) /* Local cpu only. */ extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} + #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) +#define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 537eb66abd06..ca64d2a86ec0 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_pmu; @@ -1074,23 +1074,56 @@ local_flush_and_out: put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index ba6ae7ffdc2c..272aa4f7657e 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); + struct mm_struct *mm = tb->mm; - if (tb->tlb_nr) { - flush_tsb_user(tb); + if (!tb->tlb_nr) + goto out; - if (CTX_VALID(tb->mm->context)) { + flush_tsb_user(tb); + + if (CTX_VALID(mm->context)) { + if (tb->tlb_nr == 1) { + global_flush_tlb_page(mm, tb->vaddrs[0]); + } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } - tb->tlb_nr = 0; } + tb->tlb_nr = 0; + +out: put_cpu_var(tlb_batch); } +void arch_enter_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + if (tb->tlb_nr) + flush_tlb_pending(); + tb->active = 0; +} + static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec) { @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, nr = 0; } + if (!tb->active) { + global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); + return; + } + if (nr == 0) tb->mm = mm; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b9becf..2cc3bce5ee91 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -7,11 +7,10 @@ #include #include #include -#include -#include -#include #include +#include #include +#include #include extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, - unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries) { - unsigned long i; + unsigned long tag, ent, hash; - for (i = 0; i < tb->tlb_nr; i++) { - unsigned long v = tb->vaddrs[i]; - unsigned long tag, ent, hash; + v &= ~0x1UL; + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); - v &= ~0x1UL; + tsb_flush(ent, tag); +} - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) +{ + unsigned long i; - tsb_flush(ent, tag); - } + for (i = 0; i < tb->tlb_nr; i++) + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } void flush_tsb_user(struct tlb_batch *tb) @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + } +#endif + spin_unlock_irqrestore(&mm->context.lock, flags); +} + #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index f8e13d421fcb..432aa0cb1b38 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */ nop nop + .align 32 + .globl __flush_tlb_page +__flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, %pstate + mov SECONDARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + andn %o1, 1, %o3 + be,pn %icc, 1f + or %o3, 0x10, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + retl + wrpr %g7, 0x0, %pstate + nop + nop + nop + nop + .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + be,pn %icc, 1f + andn %o1, 1, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop +__hypervisor_flush_tlb_page: /* 11 insns */ + /* %o0 = context, %o1 = vaddr */ + mov %o0, %g2 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g2, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + retl + nop + __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=context, %g1=nr, %g7=vaddrs[] */ - sllx %g1, 3, %g1 + .globl xcall_flush_tlb_page +xcall_flush_tlb_page: /* 17 insns */ + /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %g5 - andcc %g5, 0x1, %g0 + andcc %g1, 0x1, %g0 be,pn %icc, 2f - - andn %g5, 0x1, %g5 + andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync - brnz,pt %g1, 1b - nop stxa %g2, [%g4] ASI_DMMU retry nop + nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ - sllx %g1, 3, %g1 + .globl __hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ + /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 - brnz,pt %g1, 1b - nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__hypervisor_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 + call tlb_patch_one + mov 11, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_pending), %o0 - or %o0, %lo(xcall_flush_tlb_pending), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + sethi %hi(xcall_flush_tlb_page), %o0 + or %o0, %lo(xcall_flush_tlb_page), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 21, %o2 + mov 17, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 -- cgit v1.2.3 From 74c3e3fcf350b2e7e3eaf9550528ee3f74e44b37 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 19 Apr 2013 16:36:03 -0700 Subject: x86, microcode: Verify the family before dispatching microcode patching For each CPU vendor that implements CPU microcode patching, there will be a minimum family for which this is implemented. Verify this minimum level of support. This can be done in the dispatch function or early in the application functions. Doing the latter turned out to be somewhat awkward because of the ineviable split between the BSP and the AP paths, and rather than pushing deep into the application functions, do this in the dispatch function. Reported-by: "Bryan O'Donoghue" Suggested-by: Borislav Petkov Cc: Fenghua Yu Link: http://lkml.kernel.org/r/1366392183-4149-1-git-send-email-bryan.odonoghue.lkml@nexus-software.ie --- arch/x86/kernel/microcode_core_early.c | 38 +++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 577db8417d15..833d51d6ee06 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c @@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void) u32 eax = 0x00000000; u32 ebx, ecx = 0, edx; - if (!have_cpuid_p()) - return X86_VENDOR_UNKNOWN; - native_cpuid(&eax, &ebx, &ecx, &edx); if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) @@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void) return X86_VENDOR_UNKNOWN; } +static int __cpuinit x86_family(void) +{ + u32 eax = 0x00000001; + u32 ebx, ecx = 0, edx; + int x86; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + x86 = (eax >> 8) & 0xf; + if (x86 == 15) + x86 += (eax >> 20) & 0xff; + + return x86; +} + void __init load_ucode_bsp(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; - if (vendor == X86_VENDOR_INTEL) + vendor = x86_vendor(); + x86 = x86_family(); + + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_bsp(); } void __cpuinit load_ucode_ap(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; + + vendor = x86_vendor(); + x86 = x86_family(); - if (vendor == X86_VENDOR_INTEL) + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_ap(); } -- cgit v1.2.3 From 3b5e50edaf500f392f4a372296afc0b99ffa7e70 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 22 Apr 2013 17:57:54 +0200 Subject: Revert "MIPS: page.h: Provide more readable definition for PAGE_MASK." This reverts commit c17a6554782ad531f4713b33fd6339ba67ef6391. Manuel Lauss writes: lmo commit c17a6554 (MIPS: page.h: Provide more readable definition for PAGE_MASK) apparently breaks ioremap of 36-bit addresses on my Alchemy systems (PCI and PCMCIA) The reason is that in arch/mips/mm/ioremap.c line 157 (phys_addr &= PAGE_MASK) bits 32-35 are cut off. Seems the new PAGE_MASK is explicitly 32bit, or one could make it signed instead of unsigned long. --- arch/mips/include/asm/page.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 99fc547af9d3..eab99e536b5c 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -31,7 +31,7 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) -- cgit v1.2.3 From 51f8fbba64e58ee611c58f15a00b7611e13a0460 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 24 Apr 2013 12:09:14 +0200 Subject: x86, efi: Fix a build warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix this: arch/x86/boot/compressed/eboot.c: In function ‘setup_efi_vars’: arch/x86/boot/compressed/eboot.c:269:2: warning: passing argument 1 of ‘efi_call_phys’ makes pointer from integer without a cast [enabled by default] In file included from arch/x86/boot/compressed/eboot.c:12:0: /w/kernel/linux/arch/x86/include/asm/efi.h:8:33: note: expected ‘void *’ but argument is of type ‘long unsigned int’ after cc5a080c5d40 ("efi: Pass boot services variable info to runtime code"). Reported-by: Paul Bolle Cc: Matthew Garrett Signed-off-by: Borislav Petkov Signed-off-by: Matt Fleming --- arch/x86/boot/compressed/eboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 8615f7581820..41de115a55b7 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -266,7 +266,7 @@ static efi_status_t setup_efi_vars(struct boot_params *params) while (data && data->next) data = (struct setup_data *)(unsigned long)data->next; - status = efi_call_phys4(sys_table->runtime->query_variable_info, + status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS, &store_size, -- cgit v1.2.3 From f697036b93aa7345d4cbb3c854a76456c0ddac45 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Wed, 24 Apr 2013 11:16:52 -0400 Subject: efi: Check EFI revision in setup_efi_vars We need to check the runtime sys_table for the EFI version the firmware specifies instead of just checking for a NULL QueryVariableInfo. Older implementations of EFI don't have QueryVariableInfo but the runtime is a smaller structure, so the pointer to it may be pointing off into garbage. This is apparently the case with several Apple firmwares that support EFI 1.10, and the current check causes them to no longer boot. Fix based on a suggestion from Matthew Garrett. Signed-off-by: Josh Boyer Signed-off-by: Matt Fleming --- arch/x86/boot/compressed/eboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 41de115a55b7..35ee62fccf98 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -258,7 +258,7 @@ static efi_status_t setup_efi_vars(struct boot_params *params) u64 store_size, remaining_size, var_size; efi_status_t status; - if (!sys_table->runtime->query_variable_info) + if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; data = (struct setup_data *)(unsigned long)params->hdr.setup_data; -- cgit v1.2.3 From f0af97070acbad5d6a361f485828223a4faaa0ee Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 24 Apr 2013 16:52:18 -0700 Subject: sparc64: Fix missing put_cpu_var() in tlb_batch_add_one() when not batching. Reported-by: Meelis Roos Signed-off-by: David S. Miller --- arch/sparc/mm/tlb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 272aa4f7657e..83d89bcb44af 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -87,7 +87,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, if (!tb->active) { global_flush_tlb_page(mm, vaddr); flush_tsb_user_page(mm, vaddr); - return; + goto out; } if (nr == 0) @@ -98,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, if (nr >= TLB_BATCH_NR) flush_tlb_pending(); +out: put_cpu_var(tlb_batch); } -- cgit v1.2.3 From ca0ad83da17b6ba07f9eb5902e69daac90c4fa61 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sat, 20 Apr 2013 19:41:06 +0000 Subject: parisc: Provide __ucmpdi2 to resolve undefined references in 32 bit builds. The Debian experimental linux source package (3.8.5-1) build fails with the following errors: ... MODPOST 2016 modules ERROR: "__ucmpdi2" [fs/btrfs/btrfs.ko] undefined! ERROR: "__ucmpdi2" [drivers/md/dm-verity.ko] undefined! The attached patch resolves this problem. It is based on the s390 implementation of ucmpdi2.c. Signed-off-by: John David Anglin Cc: "James E.J. Bottomley" Signed-off-by: Helge Deller --- arch/parisc/kernel/parisc_ksyms.c | 2 ++ arch/parisc/lib/Makefile | 3 ++- arch/parisc/lib/ucmpdi2.c | 25 +++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 arch/parisc/lib/ucmpdi2.c (limited to 'arch') diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 6795dc6c995f..568b2c61ea02 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -120,11 +120,13 @@ extern void __ashrdi3(void); extern void __ashldi3(void); extern void __lshrdi3(void); extern void __muldi3(void); +extern void __ucmpdi2(void); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); +EXPORT_SYMBOL(__ucmpdi2); asmlinkage void * __canonicalize_funcptr_for_compare(void *); EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5f2e6904d14a..5651536ac733 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,6 +2,7 @@ # Makefile for parisc-specific library files # -lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o +lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ + ucmpdi2.o obj-y := iomap.o diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c new file mode 100644 index 000000000000..149c016f32c5 --- /dev/null +++ b/arch/parisc/lib/ucmpdi2.c @@ -0,0 +1,25 @@ +#include + +union ull_union { + unsigned long long ull; + struct { + unsigned int high; + unsigned int low; + } ui; +}; + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ + union ull_union au = {.ull = a}; + union ull_union bu = {.ull = b}; + + if (au.ui.high < bu.ui.high) + return 0; + else if (au.ui.high > bu.ui.high) + return 2; + if (au.ui.low < bu.ui.low) + return 0; + else if (au.ui.low > bu.ui.low) + return 2; + return 1; +} -- cgit v1.2.3 From 87be2f88b24ad1f206a5c87529ff406e96c95a12 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Tue, 23 Apr 2013 00:23:50 +0000 Subject: parisc: Change kunmap macro to static inline function Change kunmap macro to static inline function to fix build error compiling drivers/base/dma-buf.c. Without the change, the following error can occur: CC drivers/base/dma-buf.o drivers/base/dma-buf.c: In function 'dma_buf_kunmap': drivers/base/dma-buf.c:427:46: error: macro "kunmap" passed 3 arguments, but takes just 1 I believe parisc is the only arch to implement kunmap using a macro. Signed-off-by: John David Anglin Cc: "James E.J. Bottomley" Cc: Helge Deller Signed-off-by: Helge Deller --- arch/parisc/include/asm/cacheflush.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 79f694f3ad9b..f0e2784e7cca 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -140,7 +140,10 @@ static inline void *kmap(struct page *page) return page_address(page); } -#define kunmap(page) kunmap_parisc(page_address(page)) +static inline void kunmap(struct page *page) +{ + kunmap_parisc(page_address(page)); +} static inline void *kmap_atomic(struct page *page) { -- cgit v1.2.3 From 0f28b62890097cb8732581a80f0a47fd7649df7d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 22 Apr 2013 12:53:43 +0000 Subject: parisc: uaccess: fix compiler warnings caused by __put_user casting When targetting 32-bit processors, __put_user emits a pair of stw instructions for the 8-byte case. If the type of __val is a pointer, the marshalling code casts it to the wider integer type of u64, resulting in the following compiler warnings: kernel/signal.c: In function 'copy_siginfo_to_user': kernel/signal.c:2752:11: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] kernel/signal.c:2752:11: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] [...] This patch fixes the warnings by removing the marshalling code and using the correct output modifiers in the __put_{user,kernel}_asm64 macros so that GCC will allocate the right registers without the need to extract the two words explicitly. Cc: Helge Deller Signed-off-by: Will Deacon Signed-off-by: Helge Deller --- arch/parisc/include/asm/uaccess.h | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4ba2c93770f1..e0a82358517e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -181,30 +181,24 @@ struct exception_data { #if !defined(CONFIG_64BIT) #define __put_kernel_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%1)" \ - "\n2:\tstw %3,4(%1)\n\t" \ + "\n2:\tstw %R2,4(%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) #define __put_user_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%%sr3,%1)" \ - "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ + "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) -- cgit v1.2.3 From cf71130d630d773ef1861adbd8a034d3ac806f3e Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 23 Apr 2013 21:29:03 +0200 Subject: parisc: disable -mlong-calls compiler option for kernel modules CONFIG_MLONGCALLS was introduced in commit ec758f98328da3eb933a25dc7a2eed01ef44d849 to overcome linker issues when linking huge linux kernels, e.g. with many modules linked in. But in the kernel module loader there is no support yet for the new relocation types, which is why modules built with -mlong-calls can't be loaded. Furthermore, for modules long calls are not really necessary, since we already use stub sections which resolve long distance calls. So, let's just disable this compiler option when compiling kernel modules. Signed-off-by: Helge Deller --- arch/parisc/Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01d95e2f0581..113e28206503 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER endif # Use long jumps instead of long branches (needed if your linker fails to -# link a too big vmlinux executable) -cflags-$(CONFIG_MLONGCALLS) += -mlong-calls +# link a too big vmlinux executable). Not enabled for building modules. +ifdef CONFIG_MLONGCALLS +KBUILD_CFLAGS_KERNEL += -mlong-calls +endif # select which processor to optimise for cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 -- cgit v1.2.3 From bda079d336cd8183e1d844a265ea87ae3e1bbe78 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Tue, 23 Apr 2013 22:42:07 +0200 Subject: parisc: use spin_lock_irqsave/spin_unlock_irqrestore for PTE updates User applications running on SMP kernels have long suffered from instability and random segmentation faults. This patch improves the situation although there is more work to be done. One of the problems is the various routines in pgtable.h that update page table entries use different locking mechanisms, or no lock at all (set_pte_at). This change modifies the routines to all use the same lock pa_dbit_lock. This lock is used for dirty bit updates in the interruption code. The patch also purges the TLB entries associated with the PTE to ensure that inconsistent values are not used after the page table entry is updated. The UP and SMP code are now identical. The change also includes a minor update to the purge_tlb_entries function in cache.c to improve its efficiency. Signed-off-by: John David Anglin Cc: Helge Deller Signed-off-by: Helge Deller --- arch/parisc/include/asm/pgtable.h | 47 +++++++++++++++++++++------------------ arch/parisc/kernel/cache.c | 5 +---- 2 files changed, 26 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 7df49fad29f9..1e40d7f86be3 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -16,6 +16,8 @@ #include #include +extern spinlock_t pa_dbit_lock; + /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #define set_pte_at(mm, addr, ptep, pteval) \ do { \ + unsigned long flags; \ + spin_lock_irqsave(&pa_dbit_lock, flags); \ set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \ + spin_unlock_irqrestore(&pa_dbit_lock, flags); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP + pte_t pte; + unsigned long flags; + if (!pte_young(*ptep)) return 0; - return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); -#else - pte_t pte = *ptep; - if (!pte_young(pte)) + + spin_lock_irqsave(&pa_dbit_lock, flags); + pte = *ptep; + if (!pte_young(pte)) { + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 0; - set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + } + set_pte(ptep, pte_mkold(pte)); + purge_tlb_entries(vma->vm_mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 1; -#endif } -extern spinlock_t pa_dbit_lock; - struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; + unsigned long flags; - spin_lock(&pa_dbit_lock); + spin_lock_irqsave(&pa_dbit_lock, flags); old_pte = *ptep; pte_clear(mm,addr,ptep); - spin_unlock(&pa_dbit_lock); + purge_tlb_entries(mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP - unsigned long new, old; - - do { - old = pte_val(*ptep); - new = pte_val(pte_wrprotect(__pte (old))); - } while (cmpxchg((unsigned long *) ptep, old, new) != old); + unsigned long flags; + spin_lock_irqsave(&pa_dbit_lock, flags); + set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); -#else - pte_t old_pte = *ptep; - set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); -#endif + spin_unlock_irqrestore(&pa_dbit_lock, flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 4b12890642eb..83ded26cad06 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) /* Note: purge_tlb_entries can be called at startup with no context. */ - /* Disable preemption while we play with %sr1. */ - preempt_disable(); - mtsp(mm->context, 1); purge_tlb_start(flags); + mtsp(mm->context, 1); pdtlb(addr); pitlb(addr); purge_tlb_end(flags); - preempt_enable(); } EXPORT_SYMBOL(purge_tlb_entries); -- cgit v1.2.3