diff options
Diffstat (limited to 'arch/arm/include/asm')
34 files changed, 294 insertions, 246 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 3c4596d0ce6c..30b3bc1666d2 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += kdebug.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += msgbuf.h generic-y += param.h generic-y += parport.h @@ -20,7 +21,6 @@ generic-y += poll.h generic-y += preempt.h generic-y += resource.h generic-y += rwsem.h -generic-y += scatterlist.h generic-y += seccomp.h generic-y += sections.h generic-y += segment.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 186270b3e194..4abe57279c66 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -178,6 +178,21 @@ .endm /* + * Assembly version of "adr rd, BSYM(sym)". This should only be used to + * reference local symbols in the same assembly file which are to be + * resolved by the assembler. Other usage is undefined. + */ + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro badr\c, rd, sym +#ifdef CONFIG_THUMB2_KERNEL + adr\c \rd, \sym + 1 +#else + adr\c \rd, \sym +#endif + .endm + .endr + +/* * Get current thread_info. */ .macro get_thread_info, rd @@ -326,7 +341,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) bne 1f orr \reg, \reg, #PSR_A_BIT - adr lr, BSYM(2f) + badr lr, 2f msr spsr_cxsf, \reg __MSR_ELR_HYP(14) __ERET diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index d2f81e6b8c1c..6c2327e1c732 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -81,7 +81,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 2d46862e7bef..4812cda8fd17 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -482,10 +482,17 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ "r9","r10","lr","memory" ) +#ifdef CONFIG_MMU int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +#else +static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +#endif #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index abb2c3769b01..1692a05d3207 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -94,6 +94,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size break; #endif default: + /* Cause a link-time error, the xchg() size is not supported */ __bad_xchg(ptr, size), ret = 0; break; } @@ -102,8 +103,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size return ret; } -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr, x) ({ \ + (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ + sizeof(*(ptr))); \ +}) #include <asm-generic/cmpxchg-local.h> @@ -118,14 +121,16 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg_local(ptr, o, n) ({ \ + (__typeof(*ptr))__cmpxchg_local_generic((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#ifndef CONFIG_SMP #include <asm-generic/cmpxchg.h> -#endif #else /* min ARCH >= ARMv6 */ @@ -201,11 +206,12 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, return ret; } -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) +#define cmpxchg(ptr,o,n) ({ \ + (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long old, @@ -227,6 +233,13 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, return ret; } +#define cmpxchg_local(ptr, o, n) ({ \ + (__typeof(*ptr))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + static inline unsigned long long __cmpxchg64(unsigned long long *ptr, unsigned long long old, unsigned long long new) @@ -252,6 +265,14 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, return oldval; } +#define cmpxchg64_relaxed(ptr, o, n) ({ \ + (__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n)); \ +}) + +#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) + static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, unsigned long long old, unsigned long long new) @@ -265,23 +286,11 @@ static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, return ret; } -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#define cmpxchg64(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_relaxed(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) ({ \ + (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n)); \ +}) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 99084431d6ae..bb4fa67da541 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -19,7 +19,7 @@ * It should not be re-used except for that purpose. */ #include <linux/spinlock.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <mach/isa-dma.h> diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h index 0df7a2c1fc3d..5189fa819b60 100644 --- a/arch/arm/include/asm/edac.h +++ b/arch/arm/include/asm/edac.h @@ -18,11 +18,12 @@ #define ASM_EDAC_H /* * ECC atomic, DMA, SMP and interrupt safe scrub function. - * Implements the per arch atomic_scrub() that EDAC use for software + * Implements the per arch edac_atomic_scrub() that EDAC use for software * ECC scrubbing. It reads memory and then writes back the original * value, allowing the hardware to detect and correct memory errors. */ -static inline void atomic_scrub(void *va, u32 size) + +static inline void edac_atomic_scrub(void *va, u32 size) { #if __LINUX_ARM_ARCH__ >= 6 unsigned int *virt_addr = va; diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 469a2b30fa27..609184f522ee 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -10,7 +10,7 @@ @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ - adrne lr, BSYM(1b) + badrne lr, 1b bne asm_do_IRQ #ifdef CONFIG_SMP @@ -23,7 +23,7 @@ ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp - adrne lr, BSYM(1b) + badrne lr, 1b bne do_IPI #endif 9997: diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h index 89aefe10d66b..34c1d96ef46d 100644 --- a/arch/arm/include/asm/firmware.h +++ b/arch/arm/include/asm/firmware.h @@ -34,6 +34,10 @@ struct firmware_ops { */ int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr); /* + * Gets boot address of specified physical CPU + */ + int (*get_cpu_boot_addr)(int cpu, unsigned long *boot_addr); + /* * Boots specified physical CPU */ int (*cpu_boot)(int cpu); diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 4e78065a16aa..5eed82809d82 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -93,6 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; + preempt_disable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " TUSER(ldr) " %1, [%4]\n" " teq %1, %2\n" @@ -104,6 +105,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "cc", "memory"); *uval = val; + preempt_enable(); + return ret; } @@ -124,7 +127,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - pagefault_disable(); /* implies preempt_disable() */ +#ifndef CONFIG_SMP + preempt_disable(); +#endif + pagefault_disable(); switch (op) { case FUTEX_OP_SET: @@ -146,7 +152,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); /* subsumes preempt_enable() */ + pagefault_enable(); +#ifndef CONFIG_SMP + preempt_enable(); +#endif if (!ret) { switch (cmp) { diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h deleted file mode 100644 index d6030ff599db..000000000000 --- a/arch/arm/include/asm/hardware/arm_timer.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H -#define __ASM_ARM_HARDWARE_ARM_TIMER_H - -/* - * ARM timer implementation, found in Integrator, Versatile and Realview - * platforms. Not all platforms support all registers and bits in these - * registers, so we mark them with A for Integrator AP, C for Integrator - * CP, V for Versatile and R for Realview. - * - * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview - * can have 16-bit or 32-bit selectable via a bit in the control register. - * - * Every SP804 contains two identical timers. - */ -#define TIMER_1_BASE 0x00 -#define TIMER_2_BASE 0x20 - -#define TIMER_LOAD 0x00 /* ACVR rw */ -#define TIMER_VALUE 0x04 /* ACVR ro */ -#define TIMER_CTRL 0x08 /* ACVR rw */ -#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */ -#define TIMER_CTRL_32BIT (1 << 1) /* CVR */ -#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */ -#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */ -#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */ -#define TIMER_CTRL_IE (1 << 5) /* VR */ -#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */ -#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */ - -#define TIMER_INTCLR 0x0c /* ACVR wo */ -#define TIMER_RIS 0x10 /* CVR ro */ -#define TIMER_MIS 0x14 /* CVR ro */ -#define TIMER_BGLOAD 0x18 /* CVR rw */ - -#endif diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h deleted file mode 100644 index bb28af7c32de..000000000000 --- a/arch/arm/include/asm/hardware/timer-sp.h +++ /dev/null @@ -1,23 +0,0 @@ -struct clk; - -void __sp804_clocksource_and_sched_clock_init(void __iomem *, - const char *, struct clk *, int); -void __sp804_clockevents_init(void __iomem *, unsigned int, - struct clk *, const char *); - -static inline void sp804_clocksource_init(void __iomem *base, const char *name) -{ - __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); -} - -static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, - const char *name) -{ - __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); -} - -static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) -{ - __sp804_clockevents_init(base, irq, NULL, name); - -} diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h index 1f1b1cd112f3..7d26f6c4f0f5 100644 --- a/arch/arm/include/asm/hugetlb.h +++ b/arch/arm/include/asm/hugetlb.h @@ -53,10 +53,6 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) -{ -} - static inline int huge_pte_none(pte_t pte) { return pte_none(pte); @@ -67,15 +63,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) return pte_wrprotect(pte); } -static inline int arch_prepare_hugepage(struct page *page) -{ - return 0; -} - -static inline void arch_release_hugepage(struct page *page) -{ -} - static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index db58deb00aa7..485982084fe9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -23,6 +23,7 @@ #ifdef __KERNEL__ +#include <linux/string.h> #include <linux/types.h> #include <linux/blk_types.h> #include <asm/byteorder.h> @@ -73,17 +74,16 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" - : "+Q" (*(volatile u16 __force *)addr) - : "r" (val)); + : : "Q" (*(volatile u16 __force *)addr), "r" (val)); } #define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; - asm volatile("ldrh %1, %0" - : "+Q" (*(volatile u16 __force *)addr), - "=r" (val)); + asm volatile("ldrh %0, %1" + : "=r" (val) + : "Q" (*(volatile u16 __force *)addr)); return val; } #endif @@ -92,25 +92,23 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr) - : "r" (val)); + : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); } #define __raw_writel __raw_writel static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" - : "+Qo" (*(volatile u32 __force *)addr) - : "r" (val)); + : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); } #define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; - asm volatile("ldrb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr), - "=r" (val)); + asm volatile("ldrb %0, %1" + : "=r" (val) + : "Qo" (*(volatile u8 __force *)addr)); return val; } @@ -118,9 +116,9 @@ static inline u8 __raw_readb(const volatile void __iomem *addr) static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; - asm volatile("ldr %1, %0" - : "+Qo" (*(volatile u32 __force *)addr), - "=r" (val)); + asm volatile("ldr %0, %1" + : "=r" (val) + : "Qo" (*(volatile u32 __force *)addr)); return val; } @@ -142,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) * The _caller variety takes a __builtin_return_address(0) value for * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, - size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); - extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); -extern void __arm_iounmap(volatile void __iomem *addr); extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); @@ -319,24 +312,95 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writesw(p,d,l) __raw_writesw(p,d,l) #define writesl(p,d,l) __raw_writesl(p,d,l) +#ifndef __ARMBE__ +static inline void memset_io(volatile void __iomem *dst, unsigned c, + size_t count) +{ + extern void mmioset(void *, unsigned int, size_t); + mmioset((void __force *)dst, c, count); +} +#define memset_io(dst,c,count) memset_io(dst,c,count) + +static inline void memcpy_fromio(void *to, const volatile void __iomem *from, + size_t count) +{ + extern void mmiocpy(void *, const void *, size_t); + mmiocpy(to, (const void __force *)from, count); +} +#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) + +static inline void memcpy_toio(volatile void __iomem *to, const void *from, + size_t count) +{ + extern void mmiocpy(void *, const void *, size_t); + mmiocpy((void __force *)to, from, count); +} +#define memcpy_toio(to,from,count) memcpy_toio(to,from,count) + +#else #define memset_io(c,v,l) _memset_io(c,(v),(l)) #define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l)) #define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l)) +#endif #endif /* readl */ /* - * ioremap and friends. + * ioremap() and friends. + * + * ioremap() takes a resource address, and size. Due to the ARM memory + * types, it is important to use the correct ioremap() function as each + * mapping has specific properties. + * + * Function Memory type Cacheability Cache hint + * ioremap() Device n/a n/a + * ioremap_nocache() Device n/a n/a + * ioremap_cache() Normal Writeback Read allocate + * ioremap_wc() Normal Non-cacheable n/a + * ioremap_wt() Normal Non-cacheable n/a + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" + * - writes may be delayed before they hit the endpoint device * - * ioremap takes a PCI memory address, as specified in - * Documentation/io-mapping.txt. + * ioremap_nocache() is the same as ioremap() as there are too many device + * drivers using this for device registers, and documentation which tells + * people to use it for such for this to be any different. This is not a + * safe fallback for memory-like mappings, or memory regions where the + * compiler may generate unaligned accesses - eg, via inlining its own + * memcpy. * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * - ordering is not guaranteed without explicit dependencies or barrier + * instructions + * - writes may be delayed before they hit the endpoint memory + * + * The cache hint is only a performance hint: CPUs may alias these hints. + * Eg, a CPU not implementing read allocate but implementing write allocate + * will provide a write allocate mapping instead. */ -#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) -#define iounmap __arm_iounmap +void __iomem *ioremap(resource_size_t res_cookie, size_t size); +#define ioremap ioremap +#define ioremap_nocache ioremap + +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); +#define ioremap_cache ioremap_cache + +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); +#define ioremap_wc ioremap_wc +#define ioremap_wt ioremap_wc + +void iounmap(volatile void __iomem *iomem_cookie); +#define iounmap iounmap /* * io{read,write}{16,32}be() macros diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 3b763d6652a0..43908146a5cf 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -20,6 +20,7 @@ #if __LINUX_ARM_ARCH__ >= 6 +#define arch_local_irq_save arch_local_irq_save static inline unsigned long arch_local_irq_save(void) { unsigned long flags; @@ -31,6 +32,7 @@ static inline unsigned long arch_local_irq_save(void) return flags; } +#define arch_local_irq_enable arch_local_irq_enable static inline void arch_local_irq_enable(void) { asm volatile( @@ -40,6 +42,7 @@ static inline void arch_local_irq_enable(void) : "memory", "cc"); } +#define arch_local_irq_disable arch_local_irq_disable static inline void arch_local_irq_disable(void) { asm volatile( @@ -56,6 +59,7 @@ static inline void arch_local_irq_disable(void) /* * Save the current interrupt enable state & disable IRQs */ +#define arch_local_irq_save arch_local_irq_save static inline unsigned long arch_local_irq_save(void) { unsigned long flags, temp; @@ -73,6 +77,7 @@ static inline unsigned long arch_local_irq_save(void) /* * Enable IRQs */ +#define arch_local_irq_enable arch_local_irq_enable static inline void arch_local_irq_enable(void) { unsigned long temp; @@ -88,6 +93,7 @@ static inline void arch_local_irq_enable(void) /* * Disable IRQs */ +#define arch_local_irq_disable arch_local_irq_disable static inline void arch_local_irq_disable(void) { unsigned long temp; @@ -135,6 +141,7 @@ static inline void arch_local_irq_disable(void) /* * Save the current interrupt enable state. */ +#define arch_local_save_flags arch_local_save_flags static inline unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -147,6 +154,7 @@ static inline unsigned long arch_local_save_flags(void) /* * restore saved IRQ & FIQ state */ +#define arch_local_irq_restore arch_local_irq_restore static inline void arch_local_irq_restore(unsigned long flags) { asm volatile( @@ -156,10 +164,13 @@ static inline void arch_local_irq_restore(unsigned long flags) : "memory", "cc"); } +#define arch_irqs_disabled_flags arch_irqs_disabled_flags static inline int arch_irqs_disabled_flags(unsigned long flags) { return flags & IRQMASK_I_BIT; } +#include <asm-generic/irqflags.h> + #endif /* ifdef __KERNEL__ */ #endif /* ifndef __ASM_ARM_IRQFLAGS_H */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 25410b2d8bc1..194c91b610ff 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -23,7 +23,7 @@ #define c0_MPIDR 1 /* MultiProcessor ID Register */ #define c0_CSSELR 2 /* Cache Size Selection Register */ #define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxilliary Control Register */ +#define c1_ACTLR 4 /* Auxiliary Control Register */ #define c1_CPACR 5 /* Coprocessor Access Control */ #define c2_TTBR0 6 /* Translation Table Base Register 0 */ #define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d71607c16601..e896d2c196e6 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -218,11 +218,6 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) return 0; } -static inline void vgic_arch_setup(const struct vgic_params *vgic) -{ - BUG_ON(vgic->type != VGIC_V2); -} - int kvm_perf_init(void); int kvm_perf_teardown(void); diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 0406cb3f1af7..cb3a40717edd 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -51,7 +51,7 @@ struct machine_desc { bool (*smp_init)(void); void (*fixup)(struct tag *, char **); void (*dt_fixup)(void); - void (*init_meminfo)(void); + long long (*pv_fixup)(void); void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ void (*init_early)(void); diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 50b378f59e08..acd4983d9b1f 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); /** * mcpm_cpu_suspend - bring the calling CPU in a suspended state * - * @expected_residency: duration in microseconds the CPU is expected - * to remain suspended, or 0 if unknown/infinity. - * - * The calling CPU is suspended. The expected residency argument is used - * as a hint by the platform specific backend to implement the appropriate - * sleep state level according to the knowledge it has on wake-up latency - * for the given hardware. + * The calling CPU is suspended. This is similar to mcpm_cpu_power_down() + * except for possible extra platform specific configuration steps to allow + * an asynchronous wake-up e.g. with a pending interrupt. * * If this CPU is found to be the "last man standing" in the cluster - * then the cluster may be prepared for power-down too, if the expected - * residency makes it worthwhile. + * then the cluster may be prepared for power-down too. * * This must be called with interrupts disabled. * @@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); * This will return if mcpm_platform_register() has not been called * previously in which case the caller should take appropriate action. */ -void mcpm_cpu_suspend(u64 expected_residency); +void mcpm_cpu_suspend(void); /** * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up @@ -234,12 +229,6 @@ struct mcpm_platform_ops { void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); void (*cluster_is_up)(unsigned int cluster); int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); - - /* deprecated callbacks */ - int (*power_up)(unsigned int cpu, unsigned int cluster); - void (*power_down)(void); - void (*suspend)(u64); - void (*powered_up)(void); }; /** @@ -251,35 +240,6 @@ struct mcpm_platform_ops { */ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); -/* Synchronisation structures for coordinating safe cluster setup/teardown: */ - -/* - * When modifying this structure, make sure you update the MCPM_SYNC_ defines - * to match. - */ -struct mcpm_sync_struct { - /* individual CPU states */ - struct { - s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); - } cpus[MAX_CPUS_PER_CLUSTER]; - - /* cluster state */ - s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); - - /* inbound-side state */ - s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); -}; - -struct sync_struct { - struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; -}; - -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); -void __mcpm_outbound_leave_critical(unsigned int cluster, int state); -bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); -int __mcpm_cluster_state(unsigned int cluster); - /** * mcpm_sync_init - Initialize the cluster synchronization support * @@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void)); void __init mcpm_smp_set_ops(void); +/* + * Synchronisation structures for coordinating safe cluster setup/teardown. + * This is private to the MCPM core code and shared between C and assembly. + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + #else /* diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 184def0e1652..b7f6fb462ea0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -18,8 +18,6 @@ #include <linux/types.h> #include <linux/sizes.h> -#include <asm/cache.h> - #ifdef CONFIG_NEED_MACH_MEMORY_H #include <mach/memory.h> #endif @@ -133,20 +131,6 @@ #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) /* - * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed - * around in head.S and proc-*.S are shifted by this amount, in order to - * leave spare high bits for systems with physical address extension. This - * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but - * gives us about 38-bits or so. - */ -#ifdef CONFIG_ARM_LPAE -#define ARCH_PGD_SHIFT L1_CACHE_SHIFT -#else -#define ARCH_PGD_SHIFT 0 -#endif -#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) - -/* * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical * memory. This is used for XIP and NoMMU kernels, and on platforms that don't * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use @@ -291,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x) */ #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); @@ -302,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); */ static inline phys_addr_t __virt_to_idmap(unsigned long x) { - if (arch_virt_to_idmap) + if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) return arch_virt_to_idmap(x); else return __virt_to_phys(x); diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index ed690c49ef93..e358b7966c06 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -16,11 +16,21 @@ enum { ARM_SEC_UNLIKELY, ARM_SEC_MAX, }; +#endif struct mod_arch_specific { +#ifdef CONFIG_ARM_UNWIND struct unwind_table *unwind[ARM_SEC_MAX]; -}; #endif +#ifdef CONFIG_ARM_MODULE_PLTS + struct elf32_shdr *core_plt; + struct elf32_shdr *init_plt; + int core_plt_count; + int init_plt_count; +#endif +}; + +u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val); /* * Add the ARM architecture version to the version magic string diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 585dc33a7a24..a5635444ca41 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -31,16 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) */ #define PCI_DMA_BUS_IS_PHYS (1) -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index d9cf138fd7d4..4f9dec489931 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -19,4 +19,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) #endif +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->ARM_pc = (__ip); \ + (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \ + (regs)->ARM_sp = current_stack_pointer; \ + (regs)->ARM_cpsr = SVC_MODE; \ +} + #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index bfd662e49a25..aeddd28b3595 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -129,7 +129,36 @@ /* * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B + * ARMv6+ without TEX remapping, they are a table index. + * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B + * + * MT type Pre-ARMv6 ARMv6+ type / cacheable status + * UNCACHED Uncached Strongly ordered + * BUFFERABLE Bufferable Normal memory / non-cacheable + * WRITETHROUGH Writethrough Normal memory / write through + * WRITEBACK Writeback Normal memory / write back, read alloc + * MINICACHE Minicache N/A + * WRITEALLOC Writeback Normal memory / write back, write alloc + * DEV_SHARED Uncached Device memory (shared) + * DEV_NONSHARED Uncached Device memory (non-shared) + * DEV_WC Bufferable Normal memory / non-cacheable + * DEV_CACHED Writeback Normal memory / write back, read alloc + * VECTORS Variable Normal memory / variable + * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" */ #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 675e4ab79f68..3fc87dfd77e6 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -24,22 +24,10 @@ * interrupt and passed the address of the low level handler, * and can be used to implement any platform specific handling * before or after calling it. - * @runtime_resume: an optional handler which will be called by the - * runtime PM framework following a call to pm_runtime_get(). - * Note that if pm_runtime_get() is called more than once in - * succession this handler will only be called once. - * @runtime_suspend: an optional handler which will be called by the - * runtime PM framework following a call to pm_runtime_put(). - * Note that if pm_runtime_get() is called more than once in - * succession this handler will only be called following the - * final call to pm_runtime_put() that actually disables the - * hardware. */ struct arm_pmu_platdata { irqreturn_t (*handle_irq)(int irq, void *dev, irq_handler_t pmu_handler); - int (*runtime_resume)(struct device *dev); - int (*runtime_suspend)(struct device *dev); }; #ifdef CONFIG_HW_PERF_EVENTS @@ -92,6 +80,7 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; + cpumask_t supported_cpus; int *irq_affinity; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); @@ -122,8 +111,6 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -extern const struct dev_pm_ops armpmu_dev_pm_ops; - int armpmu_register(struct arm_pmu *armpmu, int type); u64 armpmu_event_update(struct perf_event *event); @@ -158,6 +145,10 @@ struct pmu_probe_info { #define XSCALE_PMU_PROBE(_version, _fn) \ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table); + #endif /* CONFIG_HW_PERF_EVENTS */ #endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 5324c1112f3a..8877ad5ffe10 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -125,13 +125,6 @@ extern void cpu_resume(void); ttbr; \ }) -#define cpu_set_ttbr(nr, val) \ - do { \ - u64 ttbr = val; \ - __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ - : : "r" (ttbr)); \ - } while (0) - #define cpu_get_pgd() \ ({ \ u64 pg = cpu_get_ttbr(0); \ diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 18f5a554134f..2f3ac1ba6fb4 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -61,7 +61,7 @@ asmlinkage void secondary_start_kernel(void); struct secondary_data { union { unsigned long mpu_rgn_szr; - unsigned long pgdir; + u64 pgdir; }; unsigned long swapper_pg_dir; void *stack; @@ -69,6 +69,7 @@ struct secondary_data { extern struct secondary_data secondary_data; extern volatile int pen_release; extern void secondary_startup(void); +extern void secondary_startup_arm(void); extern int __cpu_disable(void); diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index cd20029bcd94..6c7182f32cef 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h @@ -7,6 +7,7 @@ struct sleep_save_sp { }; extern void cpu_resume(void); +extern void cpu_resume_arm(void); extern int cpu_suspend(unsigned long, int (*)(unsigned long)); #endif diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h index 720ea0320a6d..3860cbd401ec 100644 --- a/arch/arm/include/asm/system_info.h +++ b/arch/arm/include/asm/system_info.h @@ -17,6 +17,7 @@ /* information about the system we're running on */ extern unsigned int system_rev; +extern const char *system_serial; extern unsigned int system_serial_low; extern unsigned int system_serial_high; extern unsigned int mem_fclk_21285; diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 2fe85fff5cca..370f7a732900 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -18,7 +18,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS]; #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index 200f9a7cd623..a91ae499614c 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -45,7 +45,6 @@ #define THUMB(x...) x #ifdef __ASSEMBLY__ #define W(instr) instr.w -#define BSYM(sym) sym + 1 #else #define WASM(instr) #instr ".w" #endif @@ -59,7 +58,6 @@ #define THUMB(x...) #ifdef __ASSEMBLY__ #define W(instr) instr -#define BSYM(sym) sym #else #define WASM(instr) #instr #endif diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h index ee5f3084243c..22e414056a8c 100644 --- a/arch/arm/include/asm/vfp.h +++ b/arch/arm/include/asm/vfp.h @@ -5,6 +5,9 @@ * First, the standard VFP set. */ +#ifndef __ASM_VFP_H +#define __ASM_VFP_H + #define FPSID cr0 #define FPSCR cr1 #define MVFR1 cr6 @@ -87,3 +90,9 @@ #define VFPOPDESC_UNUSED_BIT (24) #define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT) #define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK)) + +#ifndef __ASSEMBLY__ +void vfp_disable(void); +#endif + +#endif /* __ASM_VFP_H */ diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index 1317ee40f4df..04ff8e7b37df 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -1,6 +1,8 @@ #ifndef _ASM_ARM_XEN_HYPERVISOR_H #define _ASM_ARM_XEN_HYPERVISOR_H +#include <linux/init.h> + extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; @@ -18,4 +20,10 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) extern struct dma_map_ops *xen_dma_ops; +#ifdef CONFIG_XEN +void __init xen_early_init(void); +#else +static inline void xen_early_init(void) { return; } +#endif + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 0b579b2f4e0e..1bee8ca12494 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -12,7 +12,6 @@ #include <xen/interface/grant_table.h> #define phys_to_machine_mapping_valid(pfn) (1) -#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #define pte_mfn pte_pfn #define mfn_pte pfn_pte |