summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig15
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c49
-rw-r--r--arch/arm/mm/cache-tauros2.c29
-rw-r--r--arch/arm/mm/dma-mapping.c147
-rw-r--r--arch/arm/mm/dump.c47
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/mm/proc-macros.S19
-rw-r--r--arch/arm/mm/proc-v7-2level.S7
-rw-r--r--arch/arm/mm/proc-v7.S11
-rw-r--r--arch/arm/mm/proc-v7m.S8
11 files changed, 235 insertions, 109 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 1f8fed94c2a4..5bf7c3c3b301 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -264,7 +264,7 @@ config CPU_ARM1026
# SA110
config CPU_SA110
- bool "Support StrongARM(R) SA-110 processor" if ARCH_RPC
+ bool
select CPU_32v3 if ARCH_RPC
select CPU_32v4 if !ARCH_RPC
select CPU_ABRT_EV4
@@ -420,33 +420,32 @@ config CPU_32v3
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
- select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
+ select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
- select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
+ select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
- select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
+ select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
- select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
+ select TLS_REG_EMUL if SMP || !MMU
config CPU_32v6
bool
- select CPU_USE_DOMAINS if CPU_V6 && MMU
select TLS_REG_EMUL if !CPU_32v6K && !MMU
config CPU_32v6K
@@ -671,7 +670,7 @@ config ARM_VIRT_EXT
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
- depends on !CPU_USE_DOMAINS && CPU_V7
+ depends on CPU_V7
default y if SMP
select HAVE_PROC_CPU if PROC_FS
help
@@ -855,7 +854,7 @@ config OUTER_CACHE_SYNC
config CACHE_FEROCEON_L2
bool "Enable the Feroceon L2 cache controller"
- depends on ARCH_KIRKWOOD || ARCH_MV78XX0
+ depends on ARCH_KIRKWOOD || ARCH_MV78XX0 || ARCH_MVEBU
default y
select OUTER_CACHE
help
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 48bc3c0a87ce..dc814a548056 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -13,10 +13,15 @@
*/
#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/highmem.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
-#include <plat/cache-feroceon-l2.h>
+#include <asm/hardware/cache-feroceon-l2.h>
+
+#define L2_WRITETHROUGH_KIRKWOOD BIT(4)
/*
* Low-level cache maintenance operations.
@@ -331,7 +336,9 @@ static void __init enable_l2(void)
enable_icache();
if (d)
enable_dcache();
- }
+ } else
+ pr_err(FW_BUG
+ "Feroceon L2: bootloader left the L2 cache on!\n");
}
void __init feroceon_l2_init(int __l2_wt_override)
@@ -350,3 +357,41 @@ void __init feroceon_l2_init(int __l2_wt_override)
printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
l2_wt_override ? ", in WT override mode" : "");
}
+#ifdef CONFIG_OF
+static const struct of_device_id feroceon_ids[] __initconst = {
+ { .compatible = "marvell,kirkwood-cache"},
+ { .compatible = "marvell,feroceon-cache"},
+ {}
+};
+
+int __init feroceon_of_init(void)
+{
+ struct device_node *node;
+ void __iomem *base;
+ bool l2_wt_override = false;
+ struct resource res;
+
+#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
+ l2_wt_override = true;
+#endif
+
+ node = of_find_matching_node(NULL, feroceon_ids);
+ if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
+ if (of_address_to_resource(node, 0, &res))
+ return -ENODEV;
+
+ base = ioremap(res.start, resource_size(&res));
+ if (!base)
+ return -ENOMEM;
+
+ if (l2_wt_override)
+ writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base);
+ else
+ writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base);
+ }
+
+ feroceon_l2_init(l2_wt_override);
+
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index 1be0f4e5e6eb..b273739e6359 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -33,7 +33,7 @@
* outer cache operations into the kernel image if the kernel has been
* configured to support a pre-v7 CPU.
*/
-#if __LINUX_ARM_ARCH__ < 7
+#ifdef CONFIG_CPU_32v5
/*
* Low-level cache maintenance operations.
*/
@@ -229,33 +229,6 @@ static void __init tauros2_internal_init(unsigned int features)
}
#endif
-#ifdef CONFIG_CPU_32v6
- /*
- * Check whether this CPU lacks support for the v7 hierarchical
- * cache ops. (PJ4 is in its v6 personality mode if the MMFR3
- * register indicates no support for the v7 hierarchical cache
- * ops.)
- */
- if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
- /*
- * When Tauros2 is used in an ARMv6 system, the L2
- * enable bit is in the ARMv6 ARM-mandated position
- * (bit [26] of the System Control Register).
- */
- if (!(get_cr() & 0x04000000)) {
- printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
- adjust_cr(0x04000000, 0x04000000);
- }
-
- mode = "ARMv6";
- outer_cache.inv_range = tauros2_inv_range;
- outer_cache.clean_range = tauros2_clean_range;
- outer_cache.flush_range = tauros2_flush_range;
- outer_cache.disable = tauros2_disable;
- outer_cache.resume = tauros2_resume;
- }
-#endif
-
#ifdef CONFIG_CPU_32v7
/*
* Check whether this CPU has support for the v7 hierarchical
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 11b3914660d2..6b00be1f971e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -284,9 +284,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
-#ifdef CONFIG_HUGETLB_PAGE
-#warning ARM Coherent DMA allocator does not (yet) support huge TLB
-#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
@@ -1069,6 +1066,8 @@ fs_initcall(dma_debug_do_init);
/* IOMMU */
+static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
+
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
size_t size)
{
@@ -1076,41 +1075,87 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int align = 0;
unsigned int count, start;
unsigned long flags;
+ dma_addr_t iova;
+ int i;
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
- count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
- (1 << mapping->order) - 1) >> mapping->order;
-
- if (order > mapping->order)
- align = (1 << (order - mapping->order)) - 1;
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ align = (1 << order) - 1;
spin_lock_irqsave(&mapping->lock, flags);
- start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
- count, align);
- if (start > mapping->bits) {
- spin_unlock_irqrestore(&mapping->lock, flags);
- return DMA_ERROR_CODE;
+ for (i = 0; i < mapping->nr_bitmaps; i++) {
+ start = bitmap_find_next_zero_area(mapping->bitmaps[i],
+ mapping->bits, 0, count, align);
+
+ if (start > mapping->bits)
+ continue;
+
+ bitmap_set(mapping->bitmaps[i], start, count);
+ break;
}
- bitmap_set(mapping->bitmap, start, count);
+ /*
+ * No unused range found. Try to extend the existing mapping
+ * and perform a second attempt to reserve an IO virtual
+ * address range of size bytes.
+ */
+ if (i == mapping->nr_bitmaps) {
+ if (extend_iommu_mapping(mapping)) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ start = bitmap_find_next_zero_area(mapping->bitmaps[i],
+ mapping->bits, 0, count, align);
+
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmaps[i], start, count);
+ }
spin_unlock_irqrestore(&mapping->lock, flags);
- return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+ iova = mapping->base + (mapping->size * i);
+ iova += start << PAGE_SHIFT;
+
+ return iova;
}
static inline void __free_iova(struct dma_iommu_mapping *mapping,
dma_addr_t addr, size_t size)
{
- unsigned int start = (addr - mapping->base) >>
- (mapping->order + PAGE_SHIFT);
- unsigned int count = ((size >> PAGE_SHIFT) +
- (1 << mapping->order) - 1) >> mapping->order;
+ unsigned int start, count;
unsigned long flags;
+ dma_addr_t bitmap_base;
+ u32 bitmap_index;
+
+ if (!size)
+ return;
+
+ bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
+ BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
+
+ bitmap_base = mapping->base + mapping->size * bitmap_index;
+
+ start = (addr - bitmap_base) >> PAGE_SHIFT;
+
+ if (addr + size > bitmap_base + mapping->size) {
+ /*
+ * The address range to be freed reaches into the iova
+ * range of the next bitmap. This should not happen as
+ * we don't allow this in __alloc_iova (at the
+ * moment).
+ */
+ BUG();
+ } else
+ count = size >> PAGE_SHIFT;
spin_lock_irqsave(&mapping->lock, flags);
- bitmap_clear(mapping->bitmap, start, count);
+ bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
spin_unlock_irqrestore(&mapping->lock, flags);
}
@@ -1875,8 +1920,7 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
* @base: start address of the valid IO address space
- * @size: size of the valid IO address space
- * @order: accuracy of the IO addresses allocations
+ * @size: maximum size of the valid IO address space
*
* Creates a mapping structure which holds information about used/unused
* IO address ranges, which is required to perform memory allocation and
@@ -1886,38 +1930,54 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
- int order)
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
{
- unsigned int count = size >> (PAGE_SHIFT + order);
- unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ unsigned int bits = size >> PAGE_SHIFT;
+ unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
struct dma_iommu_mapping *mapping;
+ int extensions = 1;
int err = -ENOMEM;
- if (!count)
+ if (!bitmap_size)
return ERR_PTR(-EINVAL);
+ if (bitmap_size > PAGE_SIZE) {
+ extensions = bitmap_size / PAGE_SIZE;
+ bitmap_size = PAGE_SIZE;
+ }
+
mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
if (!mapping)
goto err;
- mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!mapping->bitmap)
+ mapping->bitmap_size = bitmap_size;
+ mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
+ GFP_KERNEL);
+ if (!mapping->bitmaps)
goto err2;
+ mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmaps[0])
+ goto err3;
+
+ mapping->nr_bitmaps = 1;
+ mapping->extensions = extensions;
mapping->base = base;
mapping->bits = BITS_PER_BYTE * bitmap_size;
- mapping->order = order;
+ mapping->size = mapping->bits << PAGE_SHIFT;
+
spin_lock_init(&mapping->lock);
mapping->domain = iommu_domain_alloc(bus);
if (!mapping->domain)
- goto err3;
+ goto err4;
kref_init(&mapping->kref);
return mapping;
+err4:
+ kfree(mapping->bitmaps[0]);
err3:
- kfree(mapping->bitmap);
+ kfree(mapping->bitmaps);
err2:
kfree(mapping);
err:
@@ -1927,14 +1987,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref)
{
+ int i;
struct dma_iommu_mapping *mapping =
container_of(kref, struct dma_iommu_mapping, kref);
iommu_domain_free(mapping->domain);
- kfree(mapping->bitmap);
+ for (i = 0; i < mapping->nr_bitmaps; i++)
+ kfree(mapping->bitmaps[i]);
+ kfree(mapping->bitmaps);
kfree(mapping);
}
+static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
+{
+ int next_bitmap;
+
+ if (mapping->nr_bitmaps > mapping->extensions)
+ return -EINVAL;
+
+ next_bitmap = mapping->nr_bitmaps;
+ mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
+ GFP_ATOMIC);
+ if (!mapping->bitmaps[next_bitmap])
+ return -ENOMEM;
+
+ mapping->nr_bitmaps++;
+
+ return 0;
+}
+
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
{
if (mapping)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index ef69152f9b52..c508f41a43bc 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
};
static const struct prot_bits section_bits[] = {
-#ifndef CONFIG_ARM_LPAE
- /* These are approximate */
+#ifdef CONFIG_ARM_LPAE
+ {
+ .mask = PMD_SECT_USER,
+ .val = PMD_SECT_USER,
+ .set = "USR",
+ }, {
+ .mask = PMD_SECT_RDONLY,
+ .val = PMD_SECT_RDONLY,
+ .set = "ro",
+ .clear = "RW",
+#elif __LINUX_ARM_ARCH__ >= 6
{
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
- .val = 0,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro",
}, {
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
.set = " RW",
}, {
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
-#else
+#else /* ARMv4/ARMv5 */
+ /* These are approximate */
{
- .mask = PMD_SECT_USER,
- .val = PMD_SECT_USER,
- .set = "USR",
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = 0,
+ .set = " ro",
}, {
- .mask = PMD_SECT_RDONLY,
- .val = PMD_SECT_RDONLY,
- .set = "ro",
- .clear = "RW",
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_AP_WRITE,
+ .set = " RW",
+ }, {
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_AP_READ,
+ .set = "USR ro",
+ }, {
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .set = "USR RW",
#endif
}, {
.mask = PMD_SECT_XN,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 804d61566a53..2a77ba8796ae 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -323,6 +323,8 @@ void __init arm_memblock_init(struct meminfo *mi,
if (mdesc->reserve)
mdesc->reserve();
+ early_init_fdt_scan_reserved_mem();
+
/*
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a623cb3ad012..b68c6b22e1c8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -516,6 +516,16 @@ static void __init build_mem_type_table(void)
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
+ * We don't use domains on ARMv6 (since this causes problems with
+ * v6/v7 kernels), so we must use a separate memory type for user
+ * r/o, kernel r/w to map the vectors page.
+ */
+#ifndef CONFIG_ARM_LPAE
+ if (cpu_arch == CPU_ARCH_ARMv6)
+ vecs_pgprot |= L_PTE_MT_VECTORS;
+#endif
+
+ /*
* ARMv6 and above have extended page tables.
*/
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index e3c48a3fe063..ee1d80593958 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -112,13 +112,9 @@
* 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc
- * 110x 0 1 0 r/w r/o
- * 11x0 0 1 0 r/w r/o
- * 1111 0 1 1 r/w r/w
- *
- * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
* 110x 1 1 1 r/o r/o
* 11x0 1 1 1 r/o r/o
+ * 1111 0 1 1 r/w r/w
*/
.macro armv6_mt_table pfx
\pfx\()_mt_table:
@@ -137,7 +133,7 @@
.long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
.long 0x00 @ unused
.long 0x00 @ unused
- .long 0x00 @ unused
+ .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
.endm
.macro armv6_set_pte_ext pfx
@@ -158,24 +154,21 @@
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
-#ifdef CONFIG_CPU_USE_DOMAINS
- @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-#endif
+
+ @ user read-only -> kernel read-only
+ bicne r3, r3, #PTE_EXT_AP0
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
- orr r3, r3, r2
+ eor r3, r3, r2
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_PRESENT
moveq r3, #0
-#ifndef CONFIG_CPU_USE_DOMAINS
tstne r1, #L_PTE_NONE
movne r3, #0
-#endif
str r3, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index bdd3be4be77a..1f52915f2b28 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext)
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
-#ifdef CONFIG_CPU_USE_DOMAINS
- @ allow kernel read/write access to read-only user pages
- tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-#endif
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_VALID
-#ifndef CONFIG_CPU_USE_DOMAINS
eorne r1, r1, #L_PTE_NONE
tstne r1, #L_PTE_NONE
-#endif
moveq r3, #0
ARM( str r3, [r0, #2048]! )
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 74f6033e76dd..195731d3813b 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -192,6 +192,7 @@ __v7_cr7mp_setup:
mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
b 1f
__v7_ca7mp_setup:
+__v7_ca12mp_setup:
__v7_ca15mp_setup:
mov r10, #0
1:
@@ -484,6 +485,16 @@ __v7_ca7mp_proc_info:
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
+ * ARM Ltd. Cortex A12 processor.
+ */
+ .type __v7_ca12mp_proc_info, #object
+__v7_ca12mp_proc_info:
+ .long 0x410fc0d0
+ .long 0xff0ffff0
+ __v7_proc __v7_ca12mp_setup
+ .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
+
+ /*
* ARM Ltd. Cortex A15 processor.
*/
.type __v7_ca15mp_proc_info, #object
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 0c93588fcb91..1ca37c72f12f 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -123,6 +123,11 @@ __v7m_setup:
mov pc, lr
ENDPROC(__v7m_setup)
+ .align 2
+__v7m_setup_stack:
+ .space 4 * 8 @ 8 registers
+__v7m_setup_stack_top:
+
define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
@@ -152,6 +157,3 @@ __v7m_proc_info:
.long nop_cache_fns @ proc_info_list.cache
.size __v7m_proc_info, . - __v7m_proc_info
-__v7m_setup_stack:
- .space 4 * 8 @ 8 registers
-__v7m_setup_stack_top: