summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/dump.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-02-15 11:14:56 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2018-02-16 18:13:57 +0000
commit20a004e7b017cce282a46ac5d02c2b9c6b9bb1fa (patch)
tree390bf8546ee581e54103e2bdc5876ac56cceefca /arch/arm64/mm/dump.c
parent2ce77f6d8a9ae9ce6d80397d88bdceb84a2004cd (diff)
arm64: mm: Use READ_ONCE/WRITE_ONCE when accessing page tables
In many cases, page tables can be accessed concurrently by either another CPU (due to things like fast gup) or by the hardware page table walker itself, which may set access/dirty bits. In such cases, it is important to use READ_ONCE/WRITE_ONCE when accessing page table entries so that entries cannot be torn, merged or subject to apparent loss of coherence due to compiler transformations. Whilst there are some scenarios where this cannot happen (e.g. pinned kernel mappings for the linear region), the overhead of using READ_ONCE /WRITE_ONCE everywhere is minimal and makes the code an awful lot easier to reason about. This patch consistently uses these macros in the arch code, as well as explicitly namespacing pointers to page table entries from the entries themselves by using adopting a 'p' suffix for the former (as is sometimes used elsewhere in the kernel source). Tested-by: Yury Norov <ynorov@caviumnetworks.com> Tested-by: Richard Ruigrok <rruigrok@codeaurora.org> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm/dump.c')
-rw-r--r--arch/arm64/mm/dump.c54
1 files changed, 30 insertions, 24 deletions
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 7b60d62ac593..65dfc8571bf8 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
}
-static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
{
- pte_t *pte = pte_offset_kernel(pmd, 0UL);
+ pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
unsigned long addr;
unsigned i;
- for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte));
+ note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
}
}
-static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
{
- pmd_t *pmd = pmd_offset(pud, 0UL);
+ pmd_t *pmdp = pmd_offset(pudp, 0UL);
unsigned long addr;
unsigned i;
- for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+ for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
+ pmd_t pmd = READ_ONCE(*pmdp);
+
addr = start + i * PMD_SIZE;
- if (pmd_none(*pmd) || pmd_sect(*pmd)) {
- note_page(st, addr, 3, pmd_val(*pmd));
+ if (pmd_none(pmd) || pmd_sect(pmd)) {
+ note_page(st, addr, 3, pmd_val(pmd));
} else {
- BUG_ON(pmd_bad(*pmd));
- walk_pte(st, pmd, addr);
+ BUG_ON(pmd_bad(pmd));
+ walk_pte(st, pmdp, addr);
}
}
}
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0UL);
+ pud_t *pudp = pud_offset(pgdp, 0UL);
unsigned long addr;
unsigned i;
- for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+ for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
+ pud_t pud = READ_ONCE(*pudp);
+
addr = start + i * PUD_SIZE;
- if (pud_none(*pud) || pud_sect(*pud)) {
- note_page(st, addr, 2, pud_val(*pud));
+ if (pud_none(pud) || pud_sect(pud)) {
+ note_page(st, addr, 2, pud_val(pud));
} else {
- BUG_ON(pud_bad(*pud));
- walk_pmd(st, pud, addr);
+ BUG_ON(pud_bad(pud));
+ walk_pmd(st, pudp, addr);
}
}
}
@@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
- pgd_t *pgd = pgd_offset(mm, 0UL);
+ pgd_t *pgdp = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+ for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
+ pgd_t pgd = READ_ONCE(*pgdp);
+
addr = start + i * PGDIR_SIZE;
- if (pgd_none(*pgd)) {
- note_page(st, addr, 1, pgd_val(*pgd));
+ if (pgd_none(pgd)) {
+ note_page(st, addr, 1, pgd_val(pgd));
} else {
- BUG_ON(pgd_bad(*pgd));
- walk_pud(st, pgd, addr);
+ BUG_ON(pgd_bad(pgd));
+ walk_pud(st, pgdp, addr);
}
}
}