From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- include/asm-m68k/atomic.h | 148 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 include/asm-m68k/atomic.h (limited to 'include/asm-m68k/atomic.h') diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h new file mode 100644 index 000000000000..38f3043e7fe1 --- /dev/null +++ b/include/asm-m68k/atomic.h @@ -0,0 +1,148 @@ +#ifndef __ARCH_M68K_ATOMIC__ +#define __ARCH_M68K_ATOMIC__ + +#include + +#include /* local_irq_XXX() */ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +/* + * We do not have SMP m68k systems, so we don't have to deal with that. + */ + +typedef struct { int counter; } atomic_t; +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v, i) (((v)->counter) = i) + +static inline void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i)); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i)); +} + +static inline void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__("addql #1,%0" : "+m" (*v)); +} + +static inline void atomic_dec(atomic_t *v) +{ + __asm__ __volatile__("subql #1,%0" : "+m" (*v)); +} + +static inline int atomic_dec_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; +} + +static inline int atomic_inc_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; +} + +#ifdef CONFIG_RMW_INSNS +static inline int atomic_add_return(int i, atomic_t *v) +{ + int t, tmp; + + __asm__ __volatile__( + "1: movel %2,%1\n" + " addl %3,%1\n" + " casl %2,%1,%0\n" + " jne 1b" + : "+m" (*v), "=&d" (t), "=&d" (tmp) + : "g" (i), "2" (atomic_read(v))); + return t; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + int t, tmp; + + __asm__ __volatile__( + "1: movel %2,%1\n" + " subl %3,%1\n" + " casl %2,%1,%0\n" + " jne 1b" + : "+m" (*v), "=&d" (t), "=&d" (tmp) + : "g" (i), "2" (atomic_read(v))); + return t; +} +#else /* !CONFIG_RMW_INSNS */ +static inline int atomic_add_return(int i, atomic_t * v) +{ + unsigned long flags; + int t; + + local_irq_save(flags); + t = atomic_read(v); + t += i; + atomic_set(v, t); + local_irq_restore(flags); + + return t; +} + +static inline int atomic_sub_return(int i, atomic_t * v) +{ + unsigned long flags; + int t; + + local_irq_save(flags); + t = atomic_read(v); + t -= i; + atomic_set(v, t); + local_irq_restore(flags); + + return t; +} +#endif /* !CONFIG_RMW_INSNS */ + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +static inline int atomic_sub_and_test(int i, atomic_t *v) +{ + char c; + __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); + return c != 0; +} + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + char c; + __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); + return c != 0; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); +} + +static inline void atomic_set_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); +} + +/* Atomic operations are already serializing */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif /* __ARCH_M68K_ATOMIC __ */ -- cgit v1.2.3