diff options
author | Helge Deller <deller@gmx.de> | 2021-02-12 16:38:52 +0100 |
---|---|---|
committer | Helge Deller <deller@gmx.de> | 2021-02-12 16:39:42 +0100 |
commit | b7795074a04669d0a023babf786d29bf67c68783 (patch) | |
tree | 5d4431b00e3416bc66f6fa1fc1a21cc530da4568 /arch/parisc/kernel | |
parent | ae3c4761c15d96999d1aab6c57aedc3beb7fa004 (diff) |
parisc: Optimize per-pagetable spinlocks
On parisc a spinlock is stored in the next page behind the pgd which
protects against parallel accesses to the pgd. That's why one additional
page (PGD_ALLOC_ORDER) is allocated for the pgd.
Matthew Wilcox suggested that we instead should use a pointer in the
struct page table for this spinlock and noted, that the comments for the
PGD_ORDER and PMD_ORDER defines were wrong.
Both suggestions are addressed with this patch. Instead of having an own
spinlock to protect the pgd, we now switch to use the existing
page_table_lock. Additionally, beside loading the pgd into cr25 in
switch_mm_irqs_off(), the physical address of this lock is loaded into
cr28 (tr4), so that we can avoid implementing a complicated lookup in
assembly for this lock in the TLB fault handlers.
The existing Hybrid L2/L3 page table scheme (where the pmd is adjacent
to the pgd) has been dropped with this patch.
Remove the locking in set_pte() and the huge-page pte functions too.
They trigger a spinlock recursion on 32bit machines and seem unnecessary.
Suggested-by: Matthew Wilcox <willy@infradead.org>
Fixes: b37d1c1898b2 ("parisc: Use per-pagetable spinlock")
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 116 |
2 files changed, 51 insertions, 66 deletions
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 305768a40773..cd2cc1b1648c 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -268,7 +268,6 @@ int main(void) DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD); DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD); DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE); - DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER)); DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT)); DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT); DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 4d37cc9cba37..9f939afe6b88 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,10 +35,9 @@ .level 2.0 #endif - .import pa_tlb_lock,data - .macro load_pa_tlb_lock reg - mfctl %cr25,\reg - addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg + /* Get aligned page_table_lock address for this mm from cr28/tr4 */ + .macro get_ptl reg + mfctl %cr28,\reg .endm /* space_to_prot macro creates a prot id from a space id */ @@ -407,7 +406,9 @@ # endif #endif dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ +#if CONFIG_PGTABLE_LEVELS < 3 copy %r0,\pte +#endif ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ @@ -417,38 +418,23 @@ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ .endm - /* Look up PTE in a 3-Level scheme. - * - * Here we implement a Hybrid L2/L3 scheme: we allocate the - * first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd - * sizes are arranged so that a single pmd covers 4GB (giving - * a full LP64 process access to 8TB) so our lookups are - * effectively L2 for the first 4GB of the kernel (i.e. for - * all ILP32 processes and all the kernel for machines with - * under 4GB of memory) */ + /* Look up PTE in a 3-Level scheme. */ .macro L3_ptep pgd,pte,index,va,fault -#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ +#if CONFIG_PGTABLE_LEVELS == 3 + copy %r0,\pte extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - shld \pgd,PxD_VALUE_SHIFT,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - copy \index,\pgd - extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd + shld \pgd,PxD_VALUE_SHIFT,\pgd #endif L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_tlb_lock lock and check page is present. */ - .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault -#ifdef CONFIG_SMP + /* Acquire page_table_lock and check page is present. */ + .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault +#ifdef CONFIG_TLB_PTLOCK 98: cmpib,COND(=),n 0,\spc,2f - load_pa_tlb_lock \tmp + get_ptl \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -463,26 +449,26 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. + /* Release page_table_lock without reloading lock address. Note that the values in the register spc are limited to NR_SPACE_IDS (262144). Thus, the stw instruction always stores a nonzero value even when register spc is 64 bits. We use an ordered store to ensure all prior accesses are performed prior to releasing the lock. */ - .macro tlb_unlock0 spc,tmp -#ifdef CONFIG_SMP + .macro ptl_unlock0 spc,tmp +#ifdef CONFIG_TLB_PTLOCK 98: or,COND(=) %r0,\spc,%r0 stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm - /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp -#ifdef CONFIG_SMP -98: load_pa_tlb_lock \tmp + /* Release page_table_lock. */ + .macro ptl_unlock1 spc,tmp +#ifdef CONFIG_TLB_PTLOCK +98: get_ptl \tmp + ptl_unlock0 \spc,\tmp 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp #endif .endm @@ -1165,14 +1151,14 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1191,14 +1177,14 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1219,7 +1205,7 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1232,7 +1218,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1252,7 +1238,7 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1265,7 +1251,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1285,7 +1271,7 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1294,7 +1280,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1313,7 +1299,7 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1322,7 +1308,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1422,14 +1408,14 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1446,14 +1432,14 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1474,7 +1460,7 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1487,7 +1473,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1498,7 +1484,7 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1511,7 +1497,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1532,7 +1518,7 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1541,7 +1527,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1552,7 +1538,7 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1561,7 +1547,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1584,14 +1570,14 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #else @@ -1604,7 +1590,7 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1617,7 +1603,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop @@ -1628,7 +1614,7 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 @@ -1637,7 +1623,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #endif |