diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-12-01 09:06:50 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-14 15:19:12 +1100 |
commit | c6a3c495f05a070d4c4016d4a51c384cba723971 (patch) | |
tree | 6d80845b8ff67e57533312085de88c5fb67c16d2 /arch/powerpc | |
parent | a43c0eb8364c022725df586e91dd753633374d66 (diff) |
powerpc/mm: Add helper for converting pte bit to hpte bits
Instead of open coding it in multiple code paths, export the helper
and add more documentation. Also make sure we don't make assumption
regarding pte bit position
Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash.h | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/hash64_4k.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/hash64_64k.c | 35 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 22 | ||||
-rw-r--r-- | arch/powerpc/mm/hugepage-hash64.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage-hash64.c | 4 |
6 files changed, 21 insertions, 67 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index e4ea9d73a541..9c212449b2e8 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -236,6 +236,7 @@ extern unsigned long pmd_hugepage_update(struct mm_struct *mm, pmd_t *pmdp, unsigned long clr, unsigned long set); +extern unsigned long htab_convert_pte_flags(unsigned long pteflags); /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c index 3b49c6f18741..ee863137035a 100644 --- a/arch/powerpc/mm/hash64_4k.c +++ b/arch/powerpc/mm/hash64_4k.c @@ -53,18 +53,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * PP bits. _PAGE_USER is already PP bit 0x2, so we only * need to add in 0x1 if it's a read-only user page */ - rflags = new_pte & _PAGE_USER; - if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) && - (new_pte & _PAGE_DIRTY))) - rflags |= 0x1; - /* - * _PAGE_EXEC -> HW_NO_EXEC since it's inverted - */ - rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); - /* - * Always add C and Memory coherence bit - */ - rflags |= HPTE_R_C | HPTE_R_M; + rflags = htab_convert_pte_flags(new_pte); /* * Add in WIMG bits */ diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index fc0898eb309d..b14280e9d850 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -85,22 +85,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * Handle the subpage protection bits */ subpg_pte = new_pte & ~subpg_prot; - /* - * PP bits. _PAGE_USER is already PP bit 0x2, so we only - * need to add in 0x1 if it's a read-only user page - */ - rflags = subpg_pte & _PAGE_USER; - if ((subpg_pte & _PAGE_USER) && !((subpg_pte & _PAGE_RW) && - (subpg_pte & _PAGE_DIRTY))) - rflags |= 0x1; - /* - * _PAGE_EXEC -> HW_NO_EXEC since it's inverted - */ - rflags |= ((subpg_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); - /* - * Always add C and Memory coherence bit - */ - rflags |= HPTE_R_C | HPTE_R_M; + rflags = htab_convert_pte_flags(subpg_pte); /* * Add in WIMG bits */ @@ -271,22 +256,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access, new_pte |= _PAGE_DIRTY; } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, old_pte, new_pte)); - /* - * PP bits. _PAGE_USER is already PP bit 0x2, so we only - * need to add in 0x1 if it's a read-only user page - */ - rflags = new_pte & _PAGE_USER; - if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) && - (new_pte & _PAGE_DIRTY))) - rflags |= 0x1; - /* - * _PAGE_EXEC -> HW_NO_EXEC since it's inverted - */ - rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); - /* - * Always add C and Memory coherence bit - */ - rflags |= HPTE_R_C | HPTE_R_M; + + rflags = htab_convert_pte_flags(new_pte); /* * Add in WIMG bits */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index cab2deb8d20b..6c67bd0bec55 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -159,20 +159,26 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { }, }; -static unsigned long htab_convert_pte_flags(unsigned long pteflags) +unsigned long htab_convert_pte_flags(unsigned long pteflags) { - unsigned long rflags = pteflags & 0x1fa; + unsigned long rflags = 0; /* _PAGE_EXEC -> NOEXEC */ if ((pteflags & _PAGE_EXEC) == 0) rflags |= HPTE_R_N; - - /* PP bits. PAGE_USER is already PP bit 0x2, so we only - * need to add in 0x1 if it's a read-only user page + /* + * PP bits: + * Linux use slb key 0 for kernel and 1 for user. + * kernel areas are mapped by PP bits 00 + * and and there is no kernel RO (_PAGE_KERNEL_RO). + * User area mapped by 0x2 and read only use by + * 0x3. */ - if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && - (pteflags & _PAGE_DIRTY))) - rflags |= 1; + if (pteflags & _PAGE_USER) { + rflags |= 0x2; + if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY))) + rflags |= 0x1; + } /* * Always add "C" bit for perf. Memory coherence is always enabled */ diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 4d87122cf6a7..91fcac6f989d 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, new_pmd |= _PAGE_DIRTY; } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp, old_pmd, new_pmd)); - /* - * PP bits. _PAGE_USER is already PP bit 0x2, so we only - * need to add in 0x1 if it's a read-only user page - */ - rflags = new_pmd & _PAGE_USER; - if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) && - (new_pmd & _PAGE_DIRTY))) - rflags |= 0x1; - /* - * _PAGE_EXEC -> HW_NO_EXEC since it's inverted - */ - rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N); + rflags = htab_convert_pte_flags(new_pmd); #if 0 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 7584e8445512..304c8520506e 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, new_pte |= _PAGE_DIRTY; } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, old_pte, new_pte)); + rflags = htab_convert_pte_flags(new_pte); - rflags = 0x2 | (!(new_pte & _PAGE_RW)); - /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ - rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); sz = ((1UL) << shift); if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) /* No CPU has hugepages but lacks no execute, so we |