diff options
Diffstat (limited to 'arch/arm/include')
23 files changed, 477 insertions, 303 deletions
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cd4458f64171..cb47d28cbe1f 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -8,6 +8,7 @@ #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#define CPUID_MPIDR 5 #define CPUID_EXT_PFR0 "c1, 0" #define CPUID_EXT_PFR1 "c1, 1" @@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) return read_cpuid(CPUID_TCM); } +static inline unsigned int __attribute_const__ read_cpuid_mpidr(void) +{ + return read_cpuid(CPUID_MPIDR); +} + /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7a21d0bf7134..28b7ee8d7398 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -32,7 +32,7 @@ static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { - return (void *)__bus_to_virt(addr); + return (void *)__bus_to_virt((unsigned long)addr); } static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h new file mode 100644 index 000000000000..5abaf5bbd985 --- /dev/null +++ b/arch/arm/include/asm/exception.h @@ -0,0 +1,19 @@ +/* + * Annotations for marking C functions as exception handlers. + * + * These should only be used for C functions that are called from the low + * level exception entry code and not any intervening C code. + */ +#ifndef __ASM_ARM_EXCEPTION_H +#define __ASM_ARM_EXCEPTION_H + +#include <linux/ftrace.h> + +#define __exception __attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry __irq_entry +#else +#define __exception_irq_entry __exception +#endif + +#endif /* __ASM_ARM_EXCEPTION_H */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 8c73900da9ed..253cc86318bf 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -25,17 +25,17 @@ #ifdef CONFIG_SMP -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ smp_mb(); \ __asm__ __volatile__( \ - "1: ldrex %1, [%2]\n" \ + "1: ldrex %1, [%3]\n" \ " " insn "\n" \ - "2: strex %1, %0, [%2]\n" \ - " teq %1, #0\n" \ + "2: strex %2, %0, [%3]\n" \ + " teq %2, #0\n" \ " bne 1b\n" \ " mov %0, #0\n" \ - __futex_atomic_ex_table("%4") \ - : "=&r" (ret), "=&r" (oldval) \ + __futex_atomic_ex_table("%5") \ + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #include <linux/preempt.h> #include <asm/domain.h> -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ __asm__ __volatile__( \ - "1: " T(ldr) " %1, [%2]\n" \ + "1: " T(ldr) " %1, [%3]\n" \ " " insn "\n" \ - "2: " T(str) " %0, [%2]\n" \ + "2: " T(str) " %0, [%3]\n" \ " mov %0, #0\n" \ - __futex_atomic_ex_table("%4") \ - : "=&r" (ret), "=&r" (oldval) \ + __futex_atomic_ex_table("%5") \ + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret; + int oldval = 0, ret, tmp; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; @@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); + __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg); break; default: ret = -ENOSYS; diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 16bd48031583..434edccdf7f3 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -45,8 +45,15 @@ #define L2X0_CLEAN_INV_LINE_PA 0x7F0 #define L2X0_CLEAN_INV_LINE_IDX 0x7F8 #define L2X0_CLEAN_INV_WAY 0x7FC -#define L2X0_LOCKDOWN_WAY_D 0x900 -#define L2X0_LOCKDOWN_WAY_I 0x904 +/* + * The lockdown registers repeat 8 times for L310, the L210 has only one + * D and one I lockdown register at 0x0900 and 0x0904. + */ +#define L2X0_LOCKDOWN_WAY_D_BASE 0x900 +#define L2X0_LOCKDOWN_WAY_I_BASE 0x904 +#define L2X0_LOCKDOWN_STRIDE 0x08 +#define L2X0_ADDR_FILTER_START 0xC00 +#define L2X0_ADDR_FILTER_END 0xC04 #define L2X0_TEST_OPERATION 0xF00 #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 @@ -60,11 +67,26 @@ #define L2X0_CACHE_ID_PART_MASK (0xf << 6) #define L2X0_CACHE_ID_PART_L210 (1 << 6) #define L2X0_CACHE_ID_PART_L310 (3 << 6) +#define L2X0_CACHE_ID_RTL_MASK 0x3f +#define L2X0_CACHE_ID_RTL_R0P0 0x0 +#define L2X0_CACHE_ID_RTL_R1P0 0x2 +#define L2X0_CACHE_ID_RTL_R2P0 0x4 +#define L2X0_CACHE_ID_RTL_R3P0 0x5 +#define L2X0_CACHE_ID_RTL_R3P1 0x6 +#define L2X0_CACHE_ID_RTL_R3P2 0x8 #define L2X0_AUX_CTRL_MASK 0xc0000fff +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) +#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) +#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 -#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17) +#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 #define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 @@ -72,8 +94,33 @@ #define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 #define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 +#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 +#define L2X0_LATENCY_CTRL_RD_SHIFT 4 +#define L2X0_LATENCY_CTRL_WR_SHIFT 8 + +#define L2X0_ADDR_FILTER_EN 1 + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); +extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); + +struct l2x0_regs { + unsigned long phy_base; + unsigned long aux_ctrl; + /* + * Whether the following registers need to be saved/restored + * depends on platform + */ + unsigned long tag_latency; + unsigned long data_latency; + unsigned long filter_start; + unsigned long filter_end; + unsigned long prefetch_ctrl; + unsigned long pwr_ctrl; +}; + +extern struct l2x0_regs l2x0_saved_regs; + #endif #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index ffb089d46a17..dc2d5102e680 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -281,10 +281,16 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) #define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) +#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) + #define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) #define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) #define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) +#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); }) + #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) #define ioread32_rep(p,d,c) __raw_readsl(p,d,c) diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 080d74f8128d..6fd955d34c65 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -10,6 +10,8 @@ #ifndef __ASM_ARM_LOCALTIMER_H #define __ASM_ARM_LOCALTIMER_H +#include <linux/errno.h> + struct clock_event_device; /* @@ -22,6 +24,10 @@ void percpu_timer_setup(void); */ asmlinkage void do_local_timer(struct pt_regs *); +/* + * Called from C code + */ +void handle_local_timer(struct pt_regs *); #ifdef CONFIG_LOCAL_TIMERS diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index b8de516e600e..441fc4fe8263 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -160,7 +160,6 @@ * so that all we need to do is modify the 8-bit constant field. */ #define __PV_BITS_31_24 0x81000000 -#define __PV_BITS_23_16 0x00810000 extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset @@ -178,9 +177,6 @@ static inline unsigned long __virt_to_phys(unsigned long x) { unsigned long t; __pv_stub(x, t, "add", __PV_BITS_31_24); -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - __pv_stub(t, t, "add", __PV_BITS_23_16); -#endif return t; } @@ -188,9 +184,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) { unsigned long t; __pv_stub(x, t, "sub", __PV_BITS_31_24); -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - __pv_stub(t, t, "sub", __PV_BITS_23_16); -#endif return t; } #else diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 543b44916d2c..6c6809f982f1 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -31,11 +31,7 @@ struct mod_arch_specific { /* Add __virt_to_phys patching state as well */ #ifdef CONFIG_ARM_PATCH_PHYS_VIRT -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT -#define MODULE_ARCH_VERMAGIC_P2V "p2v16 " -#else #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " -#endif #else #define MODULE_ARCH_VERMAGIC_P2V "" #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index d8387437ec5a..53426c66352a 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -34,6 +34,7 @@ struct outer_cache_fns { void (*sync)(void); #endif void (*set_debug)(unsigned long); + void (*resume)(void); }; #ifdef CONFIG_OUTER_CACHE @@ -74,6 +75,12 @@ static inline void outer_disable(void) outer_cache.disable(); } +static inline void outer_resume(void) +{ + if (outer_cache.resume) + outer_cache.resume(); +} + #else static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index ac75d0848889..ca94653f1ecb 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,47 +151,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); -typedef unsigned long pteval_t; - -#undef STRICT_MM_TYPECHECKS - -#ifdef STRICT_MM_TYPECHECKS -/* - * These are used to make use of C type-checking.. - */ -typedef struct { pteval_t pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pgd[2]; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pgd_val(x) ((x).pgd[0]) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#else -/* - * .. while these make it easier on the compiler - */ -typedef pteval_t pte_t; -typedef unsigned long pmd_t; -typedef unsigned long pgd_t[2]; -typedef unsigned long pgprot_t; - -#define pte_val(x) (x) -#define pmd_val(x) (x) -#define pgd_val(x) ((x)[0]) -#define pgprot_val(x) (x) - -#define __pte(x) (x) -#define __pmd(x) (x) -#define __pgprot(x) (x) - -#endif /* STRICT_MM_TYPECHECKS */ +#include <asm/pgtable-2level-types.h> #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 22de005f159c..3e08fd3fbb6b 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -105,9 +105,9 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) } static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, - unsigned long prot) + pmdval_t prot) { - unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; + pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h new file mode 100644 index 000000000000..5cfba15cb401 --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1,93 @@ +/* + * arch/arm/include/asm/pgtable-2level-hwdef.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H +#define _ASM_PGTABLE_2LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1 descriptor (PMD) + * - common + */ +#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) +#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) +#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) +#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ +/* + * - section + */ +#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ +#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10) +#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */ +#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */ +#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */ +#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ +#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ +#define PMD_SECT_AF (_AT(pmdval_t, 0)) + +#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) +#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) +#define PMD_SECT_WT (PMD_SECT_CACHEABLE) +#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) +#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) + +/* + * - coarse table (not used) + */ + +/* + * + Level 2 descriptor (PTE) + * - common + */ +#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) +#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0) +#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0) +#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */ +#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) +#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) + +/* + * - extended small page/tiny page + */ +#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ +#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) +#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) +#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) +#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4) +#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0) +#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1) +#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) +#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */ +#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */ +#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */ +#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */ +#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */ + +/* + * - small page + */ +#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4) +#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4) +#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4) +#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4) +#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4) + +#define PHYS_MASK (~0UL) + +#endif diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h new file mode 100644 index 000000000000..66cb5b0e89c5 --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level-types.h @@ -0,0 +1,67 @@ +/* + * arch/arm/include/asm/pgtable-2level-types.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H +#define _ASM_PGTABLE_2LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u32 pteval_t; +typedef u32 pmdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pmdval_t pgd[2]; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd[0]) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pmdval_t pgd_t[2]; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) ((x)[0]) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */ diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h new file mode 100644 index 000000000000..470457e1cfc5 --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level.h @@ -0,0 +1,143 @@ +/* + * arch/arm/include/asm/pgtable-2level.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_H +#define _ASM_PGTABLE_2LEVEL_H + +/* + * Hardware-wise, we have a two level page table structure, where the first + * level has 4096 entries, and the second level has 256 entries. Each entry + * is one 32-bit word. Most of the bits in the second level entry are used + * by hardware, and there aren't any "accessed" and "dirty" bits. + * + * Linux on the other hand has a three level page table structure, which can + * be wrapped to fit a two level page table structure easily - using the PGD + * and PTE only. However, Linux also expects one "PTE" table per page, and + * at least a "dirty" bit. + * + * Therefore, we tweak the implementation slightly - we tell Linux that we + * have 2048 entries in the first level, each of which is 8 bytes (iow, two + * hardware pointers to the second level.) The second level contains two + * hardware PTE tables arranged contiguously, preceded by Linux versions + * which contain the state information Linux needs. We, therefore, end up + * with 512 entries in the "PTE" level. + * + * This leads to the page tables having the following layout: + * + * pgd pte + * | | + * +--------+ + * | | +------------+ +0 + * +- - - - + | Linux pt 0 | + * | | +------------+ +1024 + * +--------+ +0 | Linux pt 1 | + * | |-----> +------------+ +2048 + * +- - - - + +4 | h/w pt 0 | + * | |-----> +------------+ +3072 + * +--------+ +8 | h/w pt 1 | + * | | +------------+ +4096 + * + * See L_PTE_xxx below for definitions of bits in the "Linux pt", and + * PTE_xxx for definitions of bits appearing in the "h/w pt". + * + * PMD_xxx definitions refer to bits in the first level page table. + * + * The "dirty" bit is emulated by only granting hardware write permission + * iff the page is marked "writable" and "dirty" in the Linux PTE. This + * means that a write to a clean page will cause a permission fault, and + * the Linux MM layer will mark the page dirty via handle_pte_fault(). + * For the hardware to notice the permission change, the TLB entry must + * be flushed, and ptep_set_access_flags() does that for us. + * + * The "accessed" or "young" bit is emulated by a similar method; we only + * allow accesses to the page if the "young" bit is set. Accesses to the + * page will cause a fault, and handle_pte_fault() will set the young bit + * for us as long as the page is marked present in the corresponding Linux + * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is + * up to date. + * + * However, when the "young" bit is cleared, we deny access to the page + * by clearing the hardware PTE. Currently Linux does not flush the TLB + * for us in this case, which means the TLB will retain the transation + * until either the TLB entry is evicted under pressure, or a context + * switch which changes the user space mapping occurs. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 2048 + +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) + +/* + * PMD_SHIFT determines the size of the area a second-level page table can map + * PGDIR_SHIFT determines what a third-level page table entry can map + */ +#define PMD_SHIFT 21 +#define PGDIR_SHIFT 21 + +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 20 +#define SECTION_SIZE (1UL << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +/* + * ARMv6 supersection address mask and size definitions. + */ +#define SUPERSECTION_SHIFT 24 +#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) +#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) + +/* + * "Linux" PTE definitions. + * + * We keep two sets of PTEs - the hardware and the linux version. + * This allows greater flexibility in the way we map the Linux bits + * onto the hardware tables, and allows us to have YOUNG and DIRTY + * bits. + * + * The PTE table pointer refers to the hardware entries; the "Linux" + * entries are stored 1024 bytes below. + */ +#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) +#define L_PTE_USER (_AT(pteval_t, 1) << 8) +#define L_PTE_XN (_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ + +/* + * These are the memory types, defined to be compatible with + * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + */ +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) + +#endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h index fd1521d5cb9d..183111164ce9 100644 --- a/arch/arm/include/asm/pgtable-hwdef.h +++ b/arch/arm/include/asm/pgtable-hwdef.h @@ -10,81 +10,6 @@ #ifndef _ASMARM_PGTABLE_HWDEF_H #define _ASMARM_PGTABLE_HWDEF_H -/* - * Hardware page table definitions. - * - * + Level 1 descriptor (PMD) - * - common - */ -#define PMD_TYPE_MASK (3 << 0) -#define PMD_TYPE_FAULT (0 << 0) -#define PMD_TYPE_TABLE (1 << 0) -#define PMD_TYPE_SECT (2 << 0) -#define PMD_BIT4 (1 << 4) -#define PMD_DOMAIN(x) ((x) << 5) -#define PMD_PROTECTION (1 << 9) /* v5 */ -/* - * - section - */ -#define PMD_SECT_BUFFERABLE (1 << 2) -#define PMD_SECT_CACHEABLE (1 << 3) -#define PMD_SECT_XN (1 << 4) /* v6 */ -#define PMD_SECT_AP_WRITE (1 << 10) -#define PMD_SECT_AP_READ (1 << 11) -#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ -#define PMD_SECT_APX (1 << 15) /* v6 */ -#define PMD_SECT_S (1 << 16) /* v6 */ -#define PMD_SECT_nG (1 << 17) /* v6 */ -#define PMD_SECT_SUPER (1 << 18) /* v6 */ - -#define PMD_SECT_UNCACHED (0) -#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) -#define PMD_SECT_WT (PMD_SECT_CACHEABLE) -#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) -#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) - -/* - * - coarse table (not used) - */ - -/* - * + Level 2 descriptor (PTE) - * - common - */ -#define PTE_TYPE_MASK (3 << 0) -#define PTE_TYPE_FAULT (0 << 0) -#define PTE_TYPE_LARGE (1 << 0) -#define PTE_TYPE_SMALL (2 << 0) -#define PTE_TYPE_EXT (3 << 0) /* v5 */ -#define PTE_BUFFERABLE (1 << 2) -#define PTE_CACHEABLE (1 << 3) - -/* - * - extended small page/tiny page - */ -#define PTE_EXT_XN (1 << 0) /* v6 */ -#define PTE_EXT_AP_MASK (3 << 4) -#define PTE_EXT_AP0 (1 << 4) -#define PTE_EXT_AP1 (2 << 4) -#define PTE_EXT_AP_UNO_SRO (0 << 4) -#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0) -#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1) -#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) -#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ -#define PTE_EXT_APX (1 << 9) /* v6 */ -#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */ -#define PTE_EXT_SHARED (1 << 10) /* v6 */ -#define PTE_EXT_NG (1 << 11) /* v6 */ - -/* - * - small page - */ -#define PTE_SMALL_AP_MASK (0xff << 4) -#define PTE_SMALL_AP_UNO_SRO (0x00 << 4) -#define PTE_SMALL_AP_UNO_SRW (0x55 << 4) -#define PTE_SMALL_AP_URO_SRW (0xaa << 4) -#define PTE_SMALL_AP_URW_SRW (0xff << 4) +#include <asm/pgtable-2level-hwdef.h> #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 5750704e0271..8ade1840c6f2 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -24,6 +24,8 @@ #include <mach/vmalloc.h> #include <asm/pgtable-hwdef.h> +#include <asm/pgtable-2level.h> + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the @@ -41,79 +43,6 @@ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #endif -/* - * Hardware-wise, we have a two level page table structure, where the first - * level has 4096 entries, and the second level has 256 entries. Each entry - * is one 32-bit word. Most of the bits in the second level entry are used - * by hardware, and there aren't any "accessed" and "dirty" bits. - * - * Linux on the other hand has a three level page table structure, which can - * be wrapped to fit a two level page table structure easily - using the PGD - * and PTE only. However, Linux also expects one "PTE" table per page, and - * at least a "dirty" bit. - * - * Therefore, we tweak the implementation slightly - we tell Linux that we - * have 2048 entries in the first level, each of which is 8 bytes (iow, two - * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, preceded by Linux versions - * which contain the state information Linux needs. We, therefore, end up - * with 512 entries in the "PTE" level. - * - * This leads to the page tables having the following layout: - * - * pgd pte - * | | - * +--------+ - * | | +------------+ +0 - * +- - - - + | Linux pt 0 | - * | | +------------+ +1024 - * +--------+ +0 | Linux pt 1 | - * | |-----> +------------+ +2048 - * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +3072 - * +--------+ +8 | h/w pt 1 | - * | | +------------+ +4096 - * - * See L_PTE_xxx below for definitions of bits in the "Linux pt", and - * PTE_xxx for definitions of bits appearing in the "h/w pt". - * - * PMD_xxx definitions refer to bits in the first level page table. - * - * The "dirty" bit is emulated by only granting hardware write permission - * iff the page is marked "writable" and "dirty" in the Linux PTE. This - * means that a write to a clean page will cause a permission fault, and - * the Linux MM layer will mark the page dirty via handle_pte_fault(). - * For the hardware to notice the permission change, the TLB entry must - * be flushed, and ptep_set_access_flags() does that for us. - * - * The "accessed" or "young" bit is emulated by a similar method; we only - * allow accesses to the page if the "young" bit is set. Accesses to the - * page will cause a fault, and handle_pte_fault() will set the young bit - * for us as long as the page is marked present in the corresponding Linux - * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is - * up to date. - * - * However, when the "young" bit is cleared, we deny access to the page - * by clearing the hardware PTE. Currently Linux does not flush the TLB - * for us in this case, which means the TLB will retain the transation - * until either the TLB entry is evicted under pressure, or a context - * switch which changes the user space mapping occurs. - */ -#define PTRS_PER_PTE 512 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 2048 - -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) -#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) -#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) - -/* - * PMD_SHIFT determines the size of the area a second-level page table can map - * PGDIR_SHIFT determines what a third-level page table entry can map - */ -#define PMD_SHIFT 21 -#define PGDIR_SHIFT 21 - #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ @@ -124,12 +53,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) -#endif /* !__ASSEMBLY__ */ - -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) /* * This is the lowest virtual address we can permit any user space @@ -138,60 +61,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); */ #define FIRST_USER_ADDRESS PAGE_SIZE -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) - -/* - * section address mask and size definitions. - */ -#define SECTION_SHIFT 20 -#define SECTION_SIZE (1UL << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - -/* - * ARMv6 supersection address mask and size definitions. - */ -#define SUPERSECTION_SHIFT 24 -#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) -#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) - -/* - * "Linux" PTE definitions. - * - * We keep two sets of PTEs - the hardware and the linux version. - * This allows greater flexibility in the way we map the Linux bits - * onto the hardware tables, and allows us to have YOUNG and DIRTY - * bits. - * - * The PTE table pointer refers to the hardware entries; the "Linux" - * entries are stored 1024 bytes below. - */ -#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) -#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) -#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ -#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) -#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) -#define L_PTE_USER (_AT(pteval_t, 1) << 8) -#define L_PTE_XN (_AT(pteval_t, 1) << 9) -#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ - -/* - * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB - */ -#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ -#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) - -#ifndef __ASSEMBLY__ - /* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, @@ -327,10 +196,10 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; static inline pte_t *pmd_page_vaddr(pmd_t pmd) { - return __va(pmd_val(pmd) & PAGE_MASK); + return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); } -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) /* we don't need complex calculations here as the pmd is folded into the pgd */ #define pmd_addr_end(addr,end) (end) @@ -351,7 +220,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) #define pte_unmap(pte) __pte_unmap(pte) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 67c70a31a1be..b7e82c4aced6 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -41,7 +41,7 @@ struct arm_pmu_platdata { * encoded error on failure. */ extern struct platform_device * -reserve_pmu(enum arm_pmu_type device); +reserve_pmu(enum arm_pmu_type type); /** * release_pmu() - Relinquish control of the performance counters @@ -62,26 +62,26 @@ release_pmu(enum arm_pmu_type type); * the actual hardware initialisation. */ extern int -init_pmu(enum arm_pmu_type device); +init_pmu(enum arm_pmu_type type); #else /* CONFIG_CPU_HAS_PMU */ #include <linux/err.h> static inline struct platform_device * -reserve_pmu(enum arm_pmu_type device) +reserve_pmu(enum arm_pmu_type type) { return ERR_PTR(-ENODEV); } static inline int -release_pmu(struct platform_device *pdev) +release_pmu(enum arm_pmu_type type) { return -ENODEV; } static inline int -init_pmu(enum arm_pmu_type device) +init_pmu(enum arm_pmu_type type) { return -ENODEV; } diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index e42d96a45d3e..0a17b62538c2 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -33,6 +33,11 @@ extern void show_ipi_list(struct seq_file *, int); asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); /* + * Called from C code, this handles an IPI. + */ +void handle_IPI(int ipinr, struct pt_regs *regs); + +/* * Setup the set of possible CPUs (via set_cpu_possible) */ extern void smp_init_cpus(void); @@ -66,6 +71,12 @@ extern void platform_secondary_init(unsigned int cpu); extern void platform_smp_prepare_cpus(unsigned int); /* + * Logical CPU mapping. + */ +extern int __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +/* * Initial data for bringing up a secondary CPU. */ struct secondary_data { diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 832888d0c20c..ed6b0499a106 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -62,13 +62,6 @@ #include <asm/outercache.h> -#define __exception __attribute__((section(".exception.text"))) -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -#define __exception_irq_entry __irq_entry -#else -#define __exception_irq_entry __exception -#endif - struct thread_info; struct task_struct; diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 8077145698ff..02b2f8203982 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -471,7 +471,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) * these operations. This is typically used when we are removing * PMD entries. */ -static inline void flush_pmd_entry(pmd_t *pmd) +static inline void flush_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; @@ -487,7 +487,7 @@ static inline void flush_pmd_entry(pmd_t *pmd) dsb(); } -static inline void clean_pmd_entry(pmd_t *pmd) +static inline void clean_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index accbd7cad9b5..a7e457ed27c3 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -1,6 +1,39 @@ #ifndef _ASM_ARM_TOPOLOGY_H #define _ASM_ARM_TOPOLOGY_H +#ifdef CONFIG_ARM_CPU_TOPOLOGY + +#include <linux/cpumask.h> + +struct cputopo_arm { + int thread_id; + int core_id; + int socket_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; +}; + +extern struct cputopo_arm cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) + +#define mc_capable() (cpu_topology[0].socket_id != -1) +#define smt_capable() (cpu_topology[0].thread_id != -1) + +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(unsigned int cpu); + +#else + +static inline void init_cpu_topology(void) { } +static inline void store_cpu_topology(unsigned int cpuid) { } + +#endif + #include <asm-generic/topology.h> #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 2c04ed5efeb5..c60a2944f95b 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -478,8 +478,8 @@ /* * Unimplemented (or alternatively implemented) syscalls */ -#define __IGNORE_fadvise64_64 1 -#define __IGNORE_migrate_pages 1 +#define __IGNORE_fadvise64_64 +#define __IGNORE_migrate_pages #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ |