summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-28 14:04:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-28 14:04:24 -0700
commit9840cfcb97fc8b6aa7b36cec3cc3fd763f14052e (patch)
tree20c1702dcfbc9874fe64a23ec01c0168ea33711a /arch/arm64/mm
parent17cfb9d332243f6d2551818df27d666d3dd70a23 (diff)
parent3d1bf78c7b13a0ad88e77dce94df88400f47e0dd (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "There's a reasonable amount here and the juicy details are all below. It's worth noting that the MTE/KASAN changes strayed outside of our usual directories due to core mm changes and some associated changes to some other architectures; Andrew asked for us to carry these [1] rather that take them via the -mm tree. Summary: - Optimise SVE switching for CPUs with 128-bit implementations. - Fix output format from SVE selftest. - Add support for versions v1.2 and 1.3 of the SMC calling convention. - Allow Pointer Authentication to be configured independently for kernel and userspace. - PMU driver cleanups for managing IRQ affinity and exposing event attributes via sysfs. - KASAN optimisations for both hardware tagging (MTE) and out-of-line software tagging implementations. - Relax frame record alignment requirements to facilitate 8-byte alignment with KASAN and Clang. - Cleanup of page-table definitions and removal of unused memory types. - Reduction of ARCH_DMA_MINALIGN back to 64 bytes. - Refactoring of our instruction decoding routines and addition of some missing encodings. - Move entry code moved into C and hardened against harmful compiler instrumentation. - Update booting requirements for the FEAT_HCX feature, added to v8.7 of the architecture. - Fix resume from idle when pNMI is being used. - Additional CPU sanity checks for MTE and preparatory changes for systems where not all of the CPUs support 32-bit EL0. - Update our kernel string routines to the latest Cortex Strings implementation. - Big cleanup of our cache maintenance routines, which were confusingly named and inconsistent in their implementations. - Tweak linker flags so that GDB can understand vmlinux when using RELR relocations. - Boot path cleanups to enable early initialisation of per-cpu operations needed by KCSAN. - Non-critical fixes and miscellaneous cleanup" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (150 commits) arm64: tlb: fix the TTL value of tlb_get_level arm64: Restrict undef hook for cpufeature registers arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS arm64: insn: avoid circular include dependency arm64: smp: Bump debugging information print down to KERN_DEBUG drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe() perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number arm64: suspend: Use cpuidle context helpers in cpu_suspend() PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter() arm64: Convert cpu_do_idle() to using cpuidle context helpers arm64: Add cpuidle context save/restore helpers arm64: head: fix code comments in set_cpu_boot_mode_flag arm64: mm: drop unused __pa(__idmap_text_start) arm64: mm: fix the count comments in compute_indices arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan arm64: mm: Pass original fault address to handle_mm_fault() arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK] arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT arm64/mm: Drop SWAPPER_INIT_MAP_SIZE arm64: Conditionally configure PTR_AUTH key of the kernel. ...
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S158
-rw-r--r--arch/arm64/mm/context.c6
-rw-r--r--arch/arm64/mm/fault.c50
-rw-r--r--arch/arm64/mm/flush.c29
-rw-r--r--arch/arm64/mm/init.c7
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/arm64/mm/proc.S24
-rw-r--r--arch/arm64/mm/ptdump.c4
8 files changed, 157 insertions, 134 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2d881f34dd9d..5051b3c1a4f1 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>
/*
- * flush_icache_range(start,end)
+ * caches_clean_inval_pou_macro(start,end) [fixup]
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -23,12 +23,27 @@
*
* - start - virtual start address of region
* - end - virtual end address of region
+ * - fixup - optional label to branch to on user fault
*/
-SYM_FUNC_START(__flush_icache_range)
- /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+ dsb ishst
+ b .Ldc_skip_\@
+alternative_else_nop_endif
+ mov x2, x0
+ mov x3, x1
+ dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ b .Lic_skip_\@
+alternative_else_nop_endif
+ invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
/*
- * __flush_cache_user_range(start,end)
+ * caches_clean_inval_pou(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,117 +52,103 @@ SYM_FUNC_START(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+ caches_clean_inval_pou_macro
+ ret
+SYM_FUNC_END(caches_clean_inval_pou)
+
+/*
+ * caches_clean_inval_user_pou(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
- dsb ishst
- b 7f
-alternative_else_nop_endif
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
- add x4, x4, x2
- cmp x4, x1
- b.lo 1b
- dsb ish
-7:
-alternative_if ARM64_HAS_CACHE_DIC
- isb
- b 8f
-alternative_else_nop_endif
- invalidate_icache_by_line x0, x1, x2, x3, 9f
-8: mov x0, #0
+ caches_clean_inval_pou_macro 2f
+ mov x0, xzr
1:
uaccess_ttbr0_disable x1, x2
ret
-9:
+2:
mov x0, #-EFAULT
b 1b
-SYM_FUNC_END(__flush_icache_range)
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
/*
- * invalidate_icache_range(start,end)
+ * icache_inval_pou(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
isb
ret
alternative_else_nop_endif
- uaccess_ttbr0_enable x2, x3, x4
-
- invalidate_icache_by_line x0, x1, x2, x3, 2f
- mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
+ invalidate_icache_by_line x0, x1, x2, x3
ret
-2:
- mov x0, #-EFAULT
- b 1b
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
/*
- * __flush_dcache_area(kaddr, size)
+ * dcache_clean_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned and invalidated to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
/*
- * __clean_dcache_area_pou(kaddr, size)
+ * dcache_clean_pou(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoU.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
/*
- * __inval_dcache_area(kaddr, size)
+ * dcache_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - kernel start address of region
+ * - end - kernel end address of region
*/
SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
/* FALLTHROUGH */
/*
- * __dma_inv_area(start, size)
+ * __dma_inv_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
- add x1, x1, x0
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -165,48 +166,48 @@ SYM_FUNC_START_PI(__inval_dcache_area)
b.lo 2b
dsb sy
ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
SYM_FUNC_END(__dma_inv_area)
/*
- * __clean_dcache_area_poc(kaddr, size)
+ * dcache_clean_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
/* FALLTHROUGH */
/*
- * __dma_clean_area(start, size)
+ * __dma_clean_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
SYM_FUNC_END(__dma_clean_area)
/*
- * __clean_dcache_area_pop(kaddr, size)
+ * dcache_clean_pop(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoP.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
alternative_if_not ARM64_HAS_DCPOP
- b __clean_dcache_area_poc
+ b dcache_clean_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
/*
* __dma_flush_area(start, size)
@@ -217,6 +218,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
* - size - size in question
*/
SYM_FUNC_START_PI(__dma_flush_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__dma_flush_area)
@@ -228,6 +230,7 @@ SYM_FUNC_END_PI(__dma_flush_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_map_area)
+ add x1, x0, x1
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
b __dma_clean_area
@@ -240,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_unmap_area)
+ add x1, x0, x1
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_area
ret
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 001737a8f309..cd72576ae2b7 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -402,14 +402,12 @@ static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
- asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
- GFP_KERNEL);
+ asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
- sizeof(*pinned_asid_map), GFP_KERNEL);
+ pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
nr_pinned_asids = 0;
/*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 871c82ab0a30..349c488765ca 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+ pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+ esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
data_abort_decode(esr);
@@ -232,13 +234,17 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
+static bool is_el1_data_abort(unsigned int esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
+}
+
static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
if (fsc_type == ESR_ELx_FSC_PERM)
@@ -258,7 +264,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
unsigned long flags;
u64 par, dfsc;
- if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+ if (!is_el1_data_abort(esr) ||
(esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
return false;
@@ -346,10 +352,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc = esr & ESR_ELx_FSC;
- if (ec != ESR_ELx_EC_DABT_CUR)
+ if (!is_el1_data_abort(esr))
return false;
if (fsc == ESR_ELx_FSC_MTE)
@@ -504,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
*/
if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
+ return handle_mm_fault(vma, addr, mm_flags, regs);
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -836,13 +841,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_el0_irq_bp_hardening(void)
-{
- /* PC has already been checked in entry.S */
- arm64_apply_bp_hardening();
-}
-NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
@@ -921,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+ /*
+ * If the page is mapped with PROT_MTE, initialise the tags at the
+ * point of allocation and page zeroing as this is usually faster than
+ * separate DC ZVA and STGM.
+ */
+ if (vma->vm_flags & VM_MTE)
+ flags |= __GFP_ZEROTAGS;
+
+ return alloc_page_vma(flags, vma, vaddr);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+ mte_zero_clear_page_tags(page_address(page));
+ page_kasan_tag_reset(page);
+ set_bit(PG_mte_tagged, &page->flags);
+}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 6d44c028d1c9..2aaf950b906c 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -14,28 +14,25 @@
#include <asm/cache.h>
#include <asm/tlbflush.h>
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
{
- unsigned long addr = (unsigned long)kaddr;
-
if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
+ dcache_clean_pou(start, end);
+ icache_inval_all_pou();
} else {
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
- __flush_icache_range(addr, addr + len);
+ caches_clean_inval_pou(start, end);
}
}
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ sync_icache_aliases(start, end);
}
/*
@@ -48,7 +45,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
memcpy(dst, src, len);
- flush_ptrace_access(vma, page, uaddr, dst, len);
+ flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
}
void __sync_icache_dcache(pte_t pte)
@@ -56,7 +53,9 @@ void __sync_icache_dcache(pte_t pte)
struct page *page = pte_page(pte);
if (!test_bit(PG_dcache_clean, &page->flags)) {
- sync_icache_aliases(page_address(page), page_size(page));
+ sync_icache_aliases((unsigned long)page_address(page),
+ (unsigned long)page_address(page) +
+ page_size(page));
set_bit(PG_dcache_clean, &page->flags);
}
}
@@ -77,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
{
/* Ensure order against any prior non-cacheable writes */
dmb(osh);
- __clean_dcache_area_pop(addr, size);
+ dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
- __inval_dcache_area(addr, size);
+ dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index e55409caaee3..6e1ca044ca90 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -499,6 +499,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
+ /*
+ * Selected page table levels should match when derived from
+ * scratch using the virtual address range and page size.
+ */
+ BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
+
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 89b66ef43a0f..0b28cc218091 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ if (((addr | next | phys) & ~PMD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmdp, phys, prot);
@@ -1114,14 +1114,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
-#if !ARM64_SWAPPER_USES_SECTION_MAPS
+#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
-#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@@ -1166,17 +1166,18 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
-#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
-#ifdef CONFIG_MEMORY_HOTPLUG
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
unmap_hotplug_range(start, end, true, altmap);
free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
-#endif
}
+#endif /* CONFIG_MEMORY_HOTPLUG */
static inline pud_t *fixmap_pud(unsigned long addr)
{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 97d7bcd8d4f2..35936c5ae1ce 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,9 +46,13 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
+#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
-#define TCR_KASAN_HW_FLAGS 0
+/*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#endif
/*
@@ -58,10 +62,8 @@
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
- MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
- MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
@@ -83,11 +85,7 @@ SYM_FUNC_START(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x12, tpidr_el1
-alternative_else
- mrs x12, tpidr_el2
-alternative_endif
+ get_this_cpu_offset x12
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
@@ -145,11 +143,7 @@ SYM_FUNC_START(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- msr tpidr_el1, x13
-alternative_else
- msr tpidr_el2, x13
-alternative_endif
+ set_this_cpu_offset x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -464,7 +458,7 @@ SYM_FUNC_START(__cpu_setup)
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
- mov_q x10, TCR_KASAN_HW_FLAGS
+ mov_q x10, TCR_MTE_FLAGS
orr tcr, tcr, x10
1:
#endif
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index a1937dfff31c..1c403536c9bb 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -159,10 +159,6 @@ static const struct prot_bits pte_bits[] = {
.set = "DEVICE/nGnRE",
}, {
.mask = PTE_ATTRINDX_MASK,
- .val = PTE_ATTRINDX(MT_DEVICE_GRE),
- .set = "DEVICE/GRE",
- }, {
- .mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL_NC),
.set = "MEM/NORMAL-NC",
}, {