diff options
author | Nicolas Pitre <nico@cam.org> | 2007-11-20 17:20:29 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-11-26 19:43:58 +0000 |
commit | b49c0f24cf6744a3f4fd09289fe7cade349dead5 (patch) | |
tree | 23d244eae940f2e36a9bc1a2e77b8f1cc53e071a /arch | |
parent | aeb747afb3fb1f42d9c82615a103882f7f97f291 (diff) |
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 94 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 3 |
2 files changed, 58 insertions, 39 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d645897652c2..0d1bbea84df0 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -339,16 +339,6 @@ __pabt_svc: str r1, [sp] @ save the "real" r0 copied @ from the exception stack -#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) -#ifndef CONFIG_MMU -#warning "NPTL on non MMU needs fixing" -#else - @ make sure our user space atomic helper is aborted - cmp r2, #TASK_SIZE - bichs r3, r3, #PSR_Z_BIT -#endif -#endif - @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -372,9 +362,25 @@ __pabt_svc: zero_fp .endm + .macro kuser_cmpxchg_check +#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#ifndef CONFIG_MMU +#warning "NPTL on non MMU needs fixing" +#else + @ Make sure our user space atomic helper is restarted + @ if it was interrupted in a critical region. Here we + @ perform a quick test inline since it should be false + @ 99.9999% of the time. The rest is done out of line. + cmp r2, #TASK_SIZE + blhs kuser_cmpxchg_fixup +#endif +#endif + .endm + .align 5 __dabt_usr: usr_entry + kuser_cmpxchg_check @ @ Call the processor-specific abort handler: @@ -404,6 +410,7 @@ __dabt_usr: .align 5 __irq_usr: usr_entry + kuser_cmpxchg_check #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off @@ -669,7 +676,7 @@ __kuser_helper_start: * * Clobbered: * - * the Z flag might be lost + * none * * Definition and user space usage example: * @@ -730,9 +737,6 @@ __kuser_memory_barrier: @ 0xffff0fa0 * * - This routine already includes memory barriers as needed. * - * - A failure might be transient, i.e. it is possible, although unlikely, - * that "failure" be returned even if *ptr == oldval. - * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ @@ -769,46 +773,62 @@ __kuser_cmpxchg: @ 0xffff0fc0 #elif __LINUX_ARM_ARCH__ < 6 +#ifdef CONFIG_MMU + /* - * Theory of operation: - * - * We set the Z flag before loading oldval. If ever an exception - * occurs we can not be sure the loaded value will still be the same - * when the exception returns, therefore the user exception handler - * will clear the Z flag whenever the interrupted user code was - * actually from the kernel address space (see the usr_entry macro). - * - * The post-increment on the str is used to prevent a race with an - * exception happening just after the str instruction which would - * clear the Z flag although the exchange was done. + * The only thing that can break atomicity in this cmpxchg + * implementation is either an IRQ or a data abort exception + * causing another process/thread to be scheduled in the middle + * of the critical sequence. To prevent this, code is added to + * the IRQ and data abort exception handlers to set the pc back + * to the beginning of the critical section if it is found to be + * within that critical section (see kuser_cmpxchg_fixup). */ -#ifdef CONFIG_MMU - teq ip, ip @ set Z flag - ldr ip, [r2] @ load current val - add r3, r2, #1 @ prepare store ptr - teqeq ip, r0 @ compare with oldval if still allowed - streq r1, [r3, #-1]! @ store newval if still allowed - subs r0, r2, r3 @ if r2 == r3 the str occured +1: ldr r3, [r2] @ load current val + subs r3, r3, r0 @ compare with oldval +2: streq r1, [r2] @ store newval if eq + rsbs r0, r3, #0 @ set return val and C flag + usr_ret lr + + .text +kuser_cmpxchg_fixup: + @ Called from kuser_cmpxchg_check macro. + @ r2 = address of interrupted insn (must be preserved). + @ sp = saved regs. r7 and r8 are clobbered. + @ 1b = first critical insn, 2b = last critical insn. + @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. + mov r7, #0xffff0fff + sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) + subs r8, r2, r7 + rsbcss r8, r8, #(2b - 1b) + strcs r7, [sp, #S_PC] + mov pc, lr + .previous + #else #warning "NPTL on non MMU needs fixing" mov r0, #-1 adds r0, r0, #0 -#endif usr_ret lr +#endif #else #ifdef CONFIG_SMP mcr p15, 0, r0, c7, c10, 5 @ dmb #endif - ldrex r3, [r2] +1: ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] + teqeq r3, #1 + beq 1b rsbs r0, r3, #0 + /* beware -- each __kuser slot must be 8 instructions max */ #ifdef CONFIG_SMP - mcr p15, 0, r0, c7, c10, 5 @ dmb -#endif + b __kuser_memory_barrier +#else usr_ret lr +#endif #endif @@ -829,7 +849,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 * * Clobbered: * - * the Z flag might be lost + * none * * Definition and user space usage example: * diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 5a52dd7f97fe..c34db4e868fa 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -509,7 +509,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) * existence. Don't ever use this from user code. */ case 0xfff0: - { + for (;;) { extern void do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs); unsigned long val; @@ -545,7 +545,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) up_read(&mm->mmap_sem); /* simulate a write access fault */ do_DataAbort(addr, 15 + (1 << 11), regs); - return -1; } #endif |