diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-25 22:09:41 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-25 22:09:41 +0000 |
commit | 2741ecb4ce5c2d430b5c44b0a169038338c21df5 (patch) | |
tree | 4aa71d7551184ee88f32c7f3660d821133058c32 /arch/arm/include | |
parent | bc85e585c6d0fab4bde12d60964b2f25802c3163 (diff) | |
parent | 5de813b6cd06460b337f9da9afe316823cf3ef45 (diff) |
Merge branch 'misc2' into devel
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 228 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 11 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/time.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 23 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 15 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-nommu.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/setup.h | 12 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 36 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 3 |
11 files changed, 294 insertions, 50 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index d0daeab2234e..e8ddec2cb158 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#ifndef CONFIG_GENERIC_ATOMIC64 +typedef struct { + u64 __aligned(8) counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +static inline u64 atomic64_read(atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrexd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + u64 tmp; + + __asm__ __volatile__("@ atomic64_set\n" +"1: ldrexd %0, %H0, [%1]\n" +" strexd %0, %2, %H2, [%1]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline void atomic64_add(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_add\n" +"1: ldrexd %0, %H0, [%2]\n" +" adds %0, %0, %3\n" +" adc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_return\n" +"1: ldrexd %0, %H0, [%2]\n" +" adds %0, %0, %3\n" +" adc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic64_sub(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_sub\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, %3\n" +" sbc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_sub_return\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, %3\n" +" sbc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +{ + u64 oldval; + unsigned long res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic64_cmpxchg\n" + "ldrexd %1, %H1, [%2]\n" + "mov %0, #0\n" + "teq %1, %3\n" + "teqeq %H1, %H3\n" + "strexdeq %0, %4, %H4, [%2]" + : "=&r" (res), "=&r" (oldval) + : "r" (&ptr->counter), "r" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_xchg\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %1, %3, %H3, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&ptr->counter), "r" (new) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_dec_if_positive(atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, #1\n" +" sbc %H0, %H0, #0\n" +" teq %H0, #0\n" +" bmi 2f\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b\n" +"2:" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +{ + u64 val; + unsigned long tmp; + int ret = 1; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_unless\n" +"1: ldrexd %0, %H0, [%3]\n" +" teq %0, %4\n" +" teqeq %H0, %H4\n" +" moveq %1, #0\n" +" beq 2f\n" +" adds %0, %0, %5\n" +" adc %H0, %H0, %H5\n" +" strexd %2, %0, %H0, [%3]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (val), "=&r" (ret), "=&r" (tmp) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (ret) + smp_mb(); + + return ret; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#else /* !CONFIG_GENERIC_ATOMIC64 */ +#include <asm-generic/atomic64.h> +#endif #include <asm-generic/atomic-long.h> #endif #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d2a59cfc30ce..c980156f3263 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); /* * __arm_ioremap takes CPU physical address. * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page + * The _caller variety takes a __builtin_return_address(0) value for + * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, + size_t, unsigned int, void *); +extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, + void *); + +extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); extern void __iounmap(volatile void __iomem *addr); /* diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index b2cc1fcd0400..8bffc3ff3acf 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -46,12 +46,4 @@ struct sys_timer { extern struct sys_timer *system_timer; extern void timer_tick(void); -/* - * Kernel time keeping support. - */ -struct timespec; -extern int (*set_rtc)(void); -extern void save_time_delta(struct timespec *delta, struct timespec *rtc); -extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); - #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5421d82a2572..4312ee5e3d0b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,6 +76,17 @@ */ #define IOREMAP_MAX_ORDER 24 +/* + * Size of DMA-consistent memory region. Must be multiple of 2M, + * between 2MB and 14MB inclusive. + */ +#ifndef CONSISTENT_DMA_SIZE +#define CONSISTENT_DMA_SIZE SZ_2M +#endif + +#define CONSISTENT_END (0xffe00000UL) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #else /* CONFIG_MMU */ /* @@ -93,11 +104,11 @@ #endif #ifndef PHYS_OFFSET -#define PHYS_OFFSET (CONFIG_DRAM_BASE) +#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) #endif #ifndef END_MEM -#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET @@ -113,14 +124,6 @@ #endif /* !CONFIG_MMU */ /* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M -#endif - -/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584d04a1..68870c776671 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329dd..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e939aa..013cfcdc4839 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp); * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff +#define VMALLOC_START 0UL +#define VMALLOC_END 0xffffffffUL #define FIRST_USER_ADDRESS (0) diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 5ccce0a9b03c..f392fb4437af 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -223,18 +223,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -/* - * Early command line parameters. - */ -struct early_params { - const char *arg; - void (*fn)(char **p); -}; - -#define __early_param(name,fn) \ -static struct early_params __early_##fn __used \ -__attribute__((__section__(".early_param.init"))) = { name, fn } - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c91c64cab922..17eb355707dd 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,6 +5,22 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +static inline void dsb_sev(void) +{ +#if __LINUX_ARM_ARCH__ >= 7 + __asm__ __volatile__ ( + "dsb\n" + "sev" + ); +#elif defined(CONFIG_CPU_32v6K) + __asm__ __volatile__ ( + "mcr p15, 0, %0, c7, c10, 4\n" + "sev" + : : "r" (0) + ); +#endif +} + /* * ARMv6 Spin-locking. * @@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev" -#endif : : "r" (&lock->lock), "r" (0) : "cc"); + + dsb_sev(); } /* @@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) __asm__ __volatile__( "str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev\n" -#endif : : "r" (&rw->lock), "r" (0) : "cc"); + + dsb_sev(); } /* write_can_lock - would write_trylock() succeed? */ @@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" -#ifdef CONFIG_CPU_32v6K -"\n cmp %0, #0\n" -" mcreq p15, 0, %0, c7, c10, 4\n" -" seveq" -#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); + + if (tmp == 0) + dsb_sev(); } static inline int arch_read_trylock(arch_rwlock_t *rw) diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 058e7e90881d..ca88e6a84707 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285; struct pt_regs; -void die(const char *msg, struct pt_regs *regs, int err) - __attribute__((noreturn)); +void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 2dfb7d7a66e9..b74970ec02c4 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); extern void iwmmxt_task_switch(struct thread_info *); -extern void vfp_sync_state(struct thread_info *thread); +extern void vfp_sync_hwstate(struct thread_info *); +extern void vfp_flush_hwstate(struct thread_info *); #endif |