summaryrefslogtreecommitdiff
path: root/include/asm-x86/mutex_64.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 08:25:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 08:25:51 -0700
commit9e9abecfc0ff3a9ad2ead954b37bbfcb863c775e (patch)
tree0c3ffda953b82750638a06507591ad587b565ff2 /include/asm-x86/mutex_64.h
parentd7bb545d86825e635cab33a1dd81ca0ad7b92887 (diff)
parent77ad386e596c6b0930cc2e09e3cce485e3ee7f72 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (613 commits) x86: standalone trampoline code x86: move suspend wakeup code to C x86: coding style fixes to arch/x86/kernel/acpi/sleep.c x86: setup_trampoline() - fix section mismatch warning x86: section mismatch fixes, #1 x86: fix paranoia about using BIOS quickboot mechanism. x86: print out buggy mptable x86: use cpu_online() x86: use cpumask_of_cpu() x86: remove unnecessary tmp local variable x86: remove unnecessary memset() x86: use ioapic_read_entry() and ioapic_write_entry() x86: avoid redundant loop in io_apic_level_ack_pending() x86: remove superfluous initialisation in boot code. x86: merge mpparse_{32,64}.c x86: unify mp_register_gsi x86: unify mp_config_acpi_legacy_irqs x86: unify mp_register_ioapic x86: unify uniq_io_apic_id x86: unify smp_scan_config ...
Diffstat (limited to 'include/asm-x86/mutex_64.h')
-rw-r--r--include/asm-x86/mutex_64.h73
1 files changed, 34 insertions, 39 deletions
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index 6c2949a3c677..f3fae9becb38 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -16,23 +16,21 @@
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
-#define __mutex_fastpath_lock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " decl (%%rdi) \n" \
- " jns 1f \n" \
- " call "#fail_fn" \n" \
- "1:" \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
} while (0)
/**
@@ -45,9 +43,8 @@ do { \
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
- int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count,
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
-#define __mutex_fastpath_unlock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " incl (%%rdi) \n" \
- " jg 1f \n" \
- " call "#fail_fn" \n" \
- "1: " \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
} while (0)
#define __mutex_slowpath_needs_to_unlock() 1
@@ -93,8 +88,8 @@ do { \
* if it wasn't 1 originally. [the fallback function is never used on
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
*/
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;