From 1a01dc87e09b6e60d22c417e00f470a72f00ec80 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 Jul 2016 20:09:30 +1000 Subject: powerpc/mm: Add mmu_early_init_devtree() Empty for now, but we'll add to it in the next patch. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 1 + arch/powerpc/include/asm/mmu.h | 1 + arch/powerpc/kernel/prom.c | 2 ++ arch/powerpc/mm/init_64.c | 6 ++++++ 4 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index d4eda6420523..4eb4bd019716 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -107,6 +107,7 @@ extern int mmu_vmemmap_psize; extern int mmu_io_psize; /* MMU initialization */ +void mmu_early_init_devtree(void); extern void radix_init_native(void); extern void hash__early_init_mmu(void); extern void radix__early_init_mmu(void); diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 54471228f7b8..14220c5c12c9 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -210,6 +210,7 @@ extern void early_init_mmu(void); extern void early_init_mmu_secondary(void); extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size); +static inline void mmu_early_init_devtree(void) { } #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index bae3db791150..9686984e79c4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -750,6 +750,8 @@ void __init early_init_devtree(void *params) if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; + mmu_early_init_devtree(); + #ifdef CONFIG_PPC_POWERNV /* Scan and build the list of machine check recoverable ranges */ of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 33709bdb0419..d0fb33ac3db2 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -411,3 +411,9 @@ struct page *realmode_pfn_to_page(unsigned long pfn) EXPORT_SYMBOL_GPL(realmode_pfn_to_page); #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ + +#ifdef CONFIG_PPC_STD_MMU_64 +void __init mmu_early_init_devtree(void) +{ +} +#endif /* CONFIG_PPC_STD_MMU_64 */ -- cgit v1.2.3 From c610ec60ed6354157ea7b0c9c9a7236126ef416b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 Jul 2016 21:29:30 +1000 Subject: powerpc/mm: Move disable_radix handling into mmu_early_init_devtree() Move the handling of the disable_radix command line argument into the newly created mmu_early_init_devtree(). It's an MMU option so it's preferable to have it in an mm related file, and it also means platforms that don't support radix don't have to carry the code. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom.c | 13 ------------- arch/powerpc/mm/init_64.c | 11 +++++++++++ 2 files changed, 11 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9686984e79c4..b4b6952e8991 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -647,14 +647,6 @@ static void __init early_reserve_mem(void) #endif } -static bool disable_radix; -static int __init parse_disable_radix(char *p) -{ - disable_radix = true; - return 0; -} -early_param("disable_radix", parse_disable_radix); - void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -744,11 +736,6 @@ void __init early_init_devtree(void *params) */ spinning_secondaries = boot_cpu_count - 1; #endif - /* - * now fixup radix MMU mode based on kernel command line - */ - if (disable_radix) - cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; mmu_early_init_devtree(); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d0fb33ac3db2..0d51e6e25db5 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -413,7 +413,18 @@ EXPORT_SYMBOL_GPL(realmode_pfn_to_page); #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ #ifdef CONFIG_PPC_STD_MMU_64 +static bool disable_radix; +static int __init parse_disable_radix(char *p) +{ + disable_radix = true; + return 0; +} +early_param("disable_radix", parse_disable_radix); + void __init mmu_early_init_devtree(void) { + /* Disable radix mode based on kernel command line. */ + if (disable_radix) + cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; } #endif /* CONFIG_PPC_STD_MMU_64 */ -- cgit v1.2.3 From bacf9cf88303c0df5794ca45dd9f297740a00913 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 Jul 2016 21:31:59 +1000 Subject: powerpc/mm: Do hash device tree scanning earlier Currently MMU initialisation (early_init_mmu()) consists of a mixture of scanning the device tree, setting MMU feature bits, and then also doing actual initialisation of MMU data structures. We'd like to decouple the setting of the MMU features from the actual setup. So split out the device tree scanning, and associated code, and call it from mmu_init_early_devtree(). Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 1 + arch/powerpc/mm/hash_utils_64.c | 53 ++++++++++++++++---------------- arch/powerpc/mm/init_64.c | 3 ++ 3 files changed, 31 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 4eb4bd019716..358f1410dc0d 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -108,6 +108,7 @@ extern int mmu_io_psize; /* MMU initialization */ void mmu_early_init_devtree(void); +void hash__early_init_devtree(void); extern void radix_init_native(void); extern void hash__early_init_mmu(void); extern void radix__early_init_mmu(void); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b78b5d211278..1a96b284b1a6 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, return 0; } -static void __init htab_init_seg_sizes(void) -{ - of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); -} - static int __init get_idx_from_shift(unsigned int shift) { int idx = -1; @@ -539,7 +534,7 @@ static bool might_have_hea(void) #endif /* #ifdef CONFIG_PPC_64K_PAGES */ -static void __init htab_init_page_sizes(void) +static void __init htab_scan_page_sizes(void) { int rc; @@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void) * Try to find the available page sizes in the device-tree */ rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); - if (rc != 0) /* Found */ - goto found; - - /* - * Not in the device-tree, let's fallback on known size - * list for 16M capable GP & GR - */ - if (mmu_has_feature(MMU_FTR_16M_PAGE)) + if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) { + /* + * Nothing in the device-tree, but the CPU supports 16M pages, + * so let's fallback on a known size list for 16M capable CPUs. + */ memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); -found: + } + +#ifdef CONFIG_HUGETLB_PAGE + /* Reserve 16G huge page memory sections for huge pages */ + of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); +#endif /* CONFIG_HUGETLB_PAGE */ +} + +static void __init htab_init_page_sizes(void) +{ if (!debug_pagealloc_enabled()) { /* * Pick a size for the linear mapping. Currently, we only @@ -630,11 +631,6 @@ found: ,mmu_psize_defs[mmu_vmemmap_psize].shift #endif ); - -#ifdef CONFIG_HUGETLB_PAGE - /* Reserve 16G huge page memory sections for huge pages */ - of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); -#endif /* CONFIG_HUGETLB_PAGE */ } static int __init htab_dt_scan_pftsize(unsigned long node, @@ -759,12 +755,6 @@ static void __init htab_initialize(void) DBG(" -> htab_initialize()\n"); - /* Initialize segment sizes */ - htab_init_seg_sizes(); - - /* Initialize page sizes */ - htab_init_page_sizes(); - if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { mmu_kernel_ssize = MMU_SEGSIZE_1T; mmu_highuser_ssize = MMU_SEGSIZE_1T; @@ -885,8 +875,19 @@ static void __init htab_initialize(void) #undef KB #undef MB +void __init hash__early_init_devtree(void) +{ + /* Initialize segment sizes */ + of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); + + /* Initialize page sizes */ + htab_scan_page_sizes(); +} + void __init hash__early_init_mmu(void) { + htab_init_page_sizes(); + /* * initialize page table size */ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 0d51e6e25db5..d023333c6c9a 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -426,5 +426,8 @@ void __init mmu_early_init_devtree(void) /* Disable radix mode based on kernel command line. */ if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; + + if (!radix_enabled()) + hash__early_init_devtree(); } #endif /* CONFIG_PPC_STD_MMU_64 */ -- cgit v1.2.3 From 2537b09c939fca26447f0ac3e688691de06eadda Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 Jul 2016 21:55:27 +1000 Subject: powerpc/mm: Do radix device tree scanning earlier Like we just did for hash, split the device tree scanning parts out and call them from mmu_early_init_devtree(). Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 1 + arch/powerpc/mm/init_64.c | 4 +++- arch/powerpc/mm/pgtable-radix.c | 3 +-- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 358f1410dc0d..9ee00c2576d0 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -109,6 +109,7 @@ extern int mmu_io_psize; /* MMU initialization */ void mmu_early_init_devtree(void); void hash__early_init_devtree(void); +void radix__early_init_devtree(void); extern void radix_init_native(void); extern void hash__early_init_mmu(void); extern void radix__early_init_mmu(void); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d023333c6c9a..e0ab33d20a10 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -427,7 +427,9 @@ void __init mmu_early_init_devtree(void) if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; - if (!radix_enabled()) + if (radix_enabled()) + radix__early_init_devtree(); + else hash__early_init_devtree(); } #endif /* CONFIG_PPC_STD_MMU_64 */ diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 003ff48a11b6..f34ccdbe0fbd 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node, return 1; } -static void __init radix_init_page_sizes(void) +void __init radix__early_init_devtree(void) { int rc; @@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void) __pte_frag_nr = H_PTE_FRAG_NR; __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; - radix_init_page_sizes(); if (!firmware_has_feature(FW_FEATURE_LPAR)) { radix_init_native(); lpcr = mfspr(SPRN_LPCR); -- cgit v1.2.3 From 9e8066f398396d26010d9c6c3c2538ff25461ef8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 Jul 2016 21:55:48 +1000 Subject: powerpc/64: Do feature patching before MMU init Up until now we needed to do the MMU init before feature patching, because part of the MMU init was scanning the device tree and setting and/or clearing some MMU feature bits. Now that we have split that MMU feature modification out into routines called from early_init_devtree() (called earlier) we can now do feature patching before calling MMU init. The advantage of this is it means the remainder of the MMU init runs with the final set of features which will apply for the rest of the life of the system. This means we don't have to special case anything called from MMU init to deal with a changing set of feature bits. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index d8216aed22b7..984696136f96 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -298,12 +298,12 @@ void __init early_setup(unsigned long dt_ptr) */ configure_exceptions(); - /* Initialize the hash table or TLB handling */ - early_init_mmu(); - /* Apply all the dynamic patching */ apply_feature_fixups(); + /* Initialize the hash table or TLB handling */ + early_init_mmu(); + /* * At this point, we can let interrupts switch to virtual mode * (the MMU has been setup), so adjust the MSR in the PACA to -- cgit v1.2.3 From a28e46f109c9637b2539b9995078d5df4f7f6c09 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 Jul 2016 22:29:18 +1000 Subject: powerpc/kernel: Check features don't change after patching Early in boot we binary patch some sections of code based on the CPU and MMU feature bits. But it is a one-time patching, there is no facility for repatching the code later if the set of features change. It is a major bug if the set of features changes after we've done the code patching - so add a check for it. Signed-off-by: Michael Ellerman --- arch/powerpc/lib/feature-fixups.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index defb2998b818..59c7caee05b2 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -152,10 +152,19 @@ static void do_final_fixups(void) #endif } -void apply_feature_fixups(void) +static unsigned long __initdata saved_cpu_features; +static unsigned int __initdata saved_mmu_features; +#ifdef CONFIG_PPC64 +static unsigned long __initdata saved_firmware_features; +#endif + +void __init apply_feature_fixups(void) { struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec); + *PTRRELOC(&saved_cpu_features) = spec->cpu_features; + *PTRRELOC(&saved_mmu_features) = spec->mmu_features; + /* * Apply the CPU-specific and firmware specific fixups to kernel text * (nop out sections not relevant to this CPU or this firmware). @@ -173,12 +182,28 @@ void apply_feature_fixups(void) PTRRELOC(&__stop___lwsync_fixup)); #ifdef CONFIG_PPC64 + saved_firmware_features = powerpc_firmware_features; do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); #endif do_final_fixups(); } +static int __init check_features(void) +{ + WARN(saved_cpu_features != cur_cpu_spec->cpu_features, + "CPU features changed after feature patching!\n"); + WARN(saved_mmu_features != cur_cpu_spec->mmu_features, + "MMU features changed after feature patching!\n"); +#ifdef CONFIG_PPC64 + WARN(saved_firmware_features != powerpc_firmware_features, + "Firmware features changed after feature patching!\n"); +#endif + + return 0; +} +late_initcall(check_features); + #ifdef CONFIG_FTR_FIXUP_SELFTEST #define check(x) \ -- cgit v1.2.3 From 5a25b6f527f9f5bbf5747b1b97e538e6d61bd2f2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 27 Jul 2016 13:19:01 +1000 Subject: powerpc/mm: Make MMU_FTR_RADIX a MMU family feature MMU feature bits are defined such that we use the lower half to present MMU family features. Remove the strict split of half and also move Radix to a mmu family feature. Radix introduce a new MMU model and strictly speaking it is a new MMU family. This also free up bits which can be used for individual features later. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 2 +- arch/powerpc/include/asm/mmu.h | 15 +++++++-------- arch/powerpc/kernel/entry_64.S | 2 +- arch/powerpc/kernel/exceptions-64s.S | 8 ++++---- arch/powerpc/kernel/idle_book3s.S | 2 +- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/mm/init_64.c | 2 +- 7 files changed, 16 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 9ee00c2576d0..ad2d501cddcf 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -24,7 +24,7 @@ struct mmu_psize_def { extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; #ifdef CONFIG_PPC_RADIX_MMU -#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX) +#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX) #else #define radix_enabled() (0) #endif diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 14220c5c12c9..599781e48552 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -12,7 +12,7 @@ */ /* - * First half is MMU families + * MMU families */ #define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) #define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) @@ -21,9 +21,13 @@ #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) #define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) +/* Radix page table supported and enabled */ +#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040) + /* - * This is individual features + * Individual features below. */ + /* * We need to clear top 16bits of va (from the remaining 64 bits )in * tlbie* instructions @@ -93,11 +97,6 @@ */ #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) -/* - * Radix page table available - */ -#define MMU_FTR_RADIX ASM_CONST(0x80000000) - /* MMU feature bit sets for various CPUs */ #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 @@ -131,7 +130,7 @@ enum { MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE | MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA | #ifdef CONFIG_PPC_RADIX_MMU - MMU_FTR_RADIX | + MMU_FTR_TYPE_RADIX | #endif 0, }; diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index fcb2887f5a33..6b8bc0dd09d4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) #ifdef CONFIG_PPC_STD_MMU_64 BEGIN_MMU_FTR_SECTION b 2f -END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) BEGIN_FTR_SECTION clrrdi r6,r8,28 /* get its ESID */ clrrdi r9,r1,28 /* get current sp ESID */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6200e4925d26..334c7fac7a4a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -938,7 +938,7 @@ BEGIN_MMU_FTR_SECTION b do_hash_page /* Try to handle as hpte fault */ MMU_FTR_SECTION_ELSE b handle_page_fault -ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) .align 7 .globl h_data_storage_common @@ -969,7 +969,7 @@ BEGIN_MMU_FTR_SECTION b do_hash_page /* Try to handle as hpte fault */ MMU_FTR_SECTION_ELSE b handle_page_fault -ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) @@ -1390,7 +1390,7 @@ slb_miss_realmode: #ifdef CONFIG_PPC_STD_MMU_64 BEGIN_MMU_FTR_SECTION bl slb_allocate_realmode -END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) #endif /* All done -- return from exception. */ @@ -1404,7 +1404,7 @@ BEGIN_MMU_FTR_SECTION beq- 2f FTR_SECTION_ELSE b 2f -ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) .machine push .machine "power4" diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 335eb6cedae5..4c2222cffbbc 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -570,7 +570,7 @@ common_exit: BEGIN_MMU_FTR_SECTION b no_segments -END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) /* Restore SLB from PACA */ ld r8,PACA_SLBSHADOWPTR(r13) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index b4b6952e8991..b0245bed6f54 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -170,7 +170,7 @@ static struct ibm_pa_feature { */ {CPU_FTR_TM_COMP, 0, 0, PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, - {0, MMU_FTR_RADIX, 0, 0, 40, 0, 0}, + {0, MMU_FTR_TYPE_RADIX, 0, 0, 40, 0, 0}, }; static void __init scan_features(unsigned long node, const unsigned char *ftrs, diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index e0ab33d20a10..6259f5db525b 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -425,7 +425,7 @@ void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ if (disable_radix) - cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; if (radix_enabled()) radix__early_init_devtree(); -- cgit v1.2.3 From a81dc9d9957354ee2ebc2e387cbc10cf457b9948 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 27 Jul 2016 13:39:42 +1000 Subject: powerpc/kernel: Convert mmu_has_feature() to returning bool The intention is that the result is only used as a boolean, so enforce that by changing the return type to bool. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 599781e48552..eb942a446969 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -135,9 +135,9 @@ enum { 0, }; -static inline int mmu_has_feature(unsigned long feature) +static inline bool mmu_has_feature(unsigned long feature) { - return (MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); + return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); } static inline void mmu_clear_feature(unsigned long feature) -- cgit v1.2.3 From 6574ba950bbe9ab2460f8143018d93d15cacf5be Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 27 Jul 2016 13:35:15 +1000 Subject: powerpc/kernel: Convert cpu_has_feature() to returning bool The intention is that the result is only used as a boolean, so enforce that by changing the return type to bool. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index df4fb5faba43..7bb87017d9db 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -2,6 +2,7 @@ #define __ASM_POWERPC_CPUTABLE_H +#include #include #include #include @@ -576,12 +577,10 @@ enum { }; #endif /* __powerpc64__ */ -static inline int cpu_has_feature(unsigned long feature) +static inline bool cpu_has_feature(unsigned long feature) { - return (CPU_FTRS_ALWAYS & feature) || - (CPU_FTRS_POSSIBLE - & cur_cpu_spec->cpu_features - & feature); + return !!((CPU_FTRS_ALWAYS & feature) || + (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); } #define HBP_NUM 1 -- cgit v1.2.3 From bab4c8de6289b4615c21ddf0400397d03ce1863c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 27 Jul 2016 13:37:58 +1000 Subject: powerpc/mm: Define radix_enabled() in one place & use static inline Currently we have radix_enabled() three times, twice in asm/book3s/64/mmu.h and then a fallback in asm/mmu.h. Consolidate them in asm/mmu.h. While we're at it convert them to be static inlines, and change the fallback case to returning a bool, like mmu_has_feature(). Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 7 ------- arch/powerpc/include/asm/mmu.h | 16 ++++++++++++---- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index ad2d501cddcf..70c995870297 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -23,13 +23,6 @@ struct mmu_psize_def { }; extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; -#ifdef CONFIG_PPC_RADIX_MMU -#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX) -#else -#define radix_enabled() (0) -#endif - - #endif /* __ASSEMBLY__ */ /* 64-bit classic hash table MMU */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index eb942a446969..f413b3213a3b 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -163,6 +163,18 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) } #endif /* !CONFIG_DEBUG_VM */ +#ifdef CONFIG_PPC_RADIX_MMU +static inline bool radix_enabled(void) +{ + return mmu_has_feature(MMU_FTR_TYPE_RADIX); +} +#else +static inline bool radix_enabled(void) +{ + return false; +} +#endif + #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. @@ -230,9 +242,5 @@ static inline void mmu_early_init_devtree(void) { } # include #endif -#ifndef radix_enabled -#define radix_enabled() (0) -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MMU_H_ */ -- cgit v1.2.3 From a141cca3892bb391d17a73dae917ad51d40ff69a Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 27 Jul 2016 20:48:36 +1000 Subject: powerpc/mm: Add early_[cpu|mmu]_has_feature() In later patches, we will be switching CPU and MMU feature checks to use static keys. For checks in early boot before jump label is initialized we need a variant of [cpu|mmu]_has_feature() that doesn't use jump labels. So create those called, unimaginatively, early_[cpu|mmu]_has_feature(). Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 7 ++++++- arch/powerpc/include/asm/mmu.h | 17 ++++++++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 7bb87017d9db..3d8dc9a7831d 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -577,12 +577,17 @@ enum { }; #endif /* __powerpc64__ */ -static inline bool cpu_has_feature(unsigned long feature) +static inline bool early_cpu_has_feature(unsigned long feature) { return !!((CPU_FTRS_ALWAYS & feature) || (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); } +static inline bool cpu_has_feature(unsigned long feature) +{ + return early_cpu_has_feature(feature); +} + #define HBP_NUM 1 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index f413b3213a3b..08b4c06604d6 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -135,11 +135,16 @@ enum { 0, }; -static inline bool mmu_has_feature(unsigned long feature) +static inline bool early_mmu_has_feature(unsigned long feature) { return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); } +static inline bool mmu_has_feature(unsigned long feature) +{ + return early_mmu_has_feature(feature); +} + static inline void mmu_clear_feature(unsigned long feature) { cur_cpu_spec->mmu_features &= ~feature; @@ -168,11 +173,21 @@ static inline bool radix_enabled(void) { return mmu_has_feature(MMU_FTR_TYPE_RADIX); } + +static inline bool early_radix_enabled(void) +{ + return early_mmu_has_feature(MMU_FTR_TYPE_RADIX); +} #else static inline bool radix_enabled(void) { return false; } + +static inline bool early_radix_enabled(void) +{ + return false; +} #endif #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3 From b8f1b4f8606b40b478072fb551f79e2984d44fad Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 23 Jul 2016 14:42:35 +0530 Subject: powerpc/mm: Convert early cpu/mmu feature check to use the new helpers This switches early feature checks to use the non static key variant of the function. In later patches we will be switching cpu_has_feature() and mmu_has_feature() to use static keys and we can use them only after static key/jump label is initialized. Any check for feature before jump label init should be done using this new helper. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 2 +- arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/setup_64.c | 4 ++-- arch/powerpc/mm/hash_utils_64.c | 2 +- arch/powerpc/mm/init_64.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 70c995870297..0cbde6a6750b 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { - if (radix_enabled()) + if (early_radix_enabled()) return radix__setup_initial_memory_limit(first_memblock_base, first_memblock_size); return hash__setup_initial_memory_limit(first_memblock_base, diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 93dae296b6be..fa20060ff7a5 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca) * if we do a GET_PACA() before the feature fixups have been * applied */ - if (cpu_has_feature(CPU_FTR_HVMODE)) + if (early_cpu_has_feature(CPU_FTR_HVMODE)) mtspr(SPRN_SPRG_HPACA, local_paca); #endif mtspr(SPRN_SPRG_PACA, local_paca); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 984696136f96..eafb9a79e011 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -227,8 +227,8 @@ static void __init configure_exceptions(void) opal_configure_cores(); /* Enable AIL if supported, and we are in hypervisor mode */ - if (cpu_has_feature(CPU_FTR_HVMODE) && - cpu_has_feature(CPU_FTR_ARCH_207S)) { + if (early_cpu_has_feature(CPU_FTR_HVMODE) && + early_cpu_has_feature(CPU_FTR_ARCH_207S)) { unsigned long lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1a96b284b1a6..0821556e16f4 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -549,7 +549,7 @@ static void __init htab_scan_page_sizes(void) * Try to find the available page sizes in the device-tree */ rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); - if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) { + if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) { /* * Nothing in the device-tree, but the CPU supports 16M pages, * so let's fallback on a known size list for 16M capable CPUs. diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 6259f5db525b..16ada1eb7e26 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void) if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; - if (radix_enabled()) + if (early_radix_enabled()) radix__early_init_devtree(); else hash__early_init_devtree(); -- cgit v1.2.3 From 309b315b6ec686ce050758cc4e29f6ad1125a83f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 23 Jul 2016 14:42:38 +0530 Subject: powerpc: Call jump_label_init() in apply_feature_fixups() Call jump_label_init() early so that we can use static keys for CPU and MMU feature checks. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/lib/feature-fixups.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 59c7caee05b2..ae57de78e2aa 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -187,6 +188,13 @@ void __init apply_feature_fixups(void) &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); #endif do_final_fixups(); + + /* + * Initialise jump label. This causes all the cpu/mmu_has_feature() + * checks to take on their correct polarity based on the current set of + * CPU/MMU features. + */ + jump_label_init(); } static int __init check_features(void) -- cgit v1.2.3 From 905259e33d0ca0ee8e0d55783c3eef3402df81b7 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Sat, 23 Jul 2016 14:42:39 +0530 Subject: powerpc: Remove mfvtb() This function is only used by get_vtb(). They are almost the same except the reading from the real register. Move the mfspr() to get_vtb() and kill the function mfvtb(). With this, we can eliminate the use of cpu_has_feature() in very core header file like reg.h. This is a preparation for the use of jump label for cpu_has_feature(). Signed-off-by: Kevin Hao Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/reg.h | 9 --------- arch/powerpc/include/asm/time.h | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 40f3615bf940..f69f40f1519a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1256,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long bits) __msr_check_and_clear(bits); } -static inline unsigned long mfvtb (void) -{ -#ifdef CONFIG_PPC_BOOK3S_64 - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - return mfspr(SPRN_VTB); -#endif - return 0; -} - #ifdef __powerpc64__ #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) #define mftb() ({unsigned long rval; \ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 09211640a0e0..cbbeaf0a6597 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -103,7 +103,7 @@ static inline u64 get_vtb(void) { #ifdef CONFIG_PPC_BOOK3S_64 if (cpu_has_feature(CPU_FTR_ARCH_207S)) - return mfvtb(); + return mfspr(SPRN_VTB); #endif return 0; } -- cgit v1.2.3 From b92a226e528423b8d249dd09bb450d53361fbfcb Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Sat, 23 Jul 2016 14:42:40 +0530 Subject: powerpc: Move cpu_has_feature() to a separate file We plan to use jump label for cpu_has_feature(). In order to implement this we need to include the linux/jump_label.h in asm/cputable.h. Unfortunately if we do that it leads to an include loop. The root of the problem seems to be that reg.h needs cputable.h (for CPU_FTRs), and then cputable.h via jump_label.h eventually pulls in hw_irq.h which needs reg.h (for MSR_EE). So move cpu_has_feature() to a separate file on its own. Signed-off-by: Kevin Hao Signed-off-by: Aneesh Kumar K.V [mpe: Rename to cpu_has_feature.h and flesh out change log] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 1 + arch/powerpc/include/asm/cacheflush.h | 1 + arch/powerpc/include/asm/cpu_has_feature.h | 20 ++++++++++++++++++++ arch/powerpc/include/asm/cputable.h | 11 ----------- arch/powerpc/include/asm/cputime.h | 1 + arch/powerpc/include/asm/dbell.h | 1 + arch/powerpc/include/asm/dcr-native.h | 1 + arch/powerpc/include/asm/mman.h | 1 + arch/powerpc/include/asm/time.h | 1 + arch/powerpc/include/asm/xor.h | 1 + arch/powerpc/kernel/align.c | 1 + arch/powerpc/kernel/irq.c | 1 + arch/powerpc/kernel/process.c | 1 + arch/powerpc/kernel/setup-common.c | 1 + arch/powerpc/kernel/setup_32.c | 1 + arch/powerpc/kernel/smp.c | 1 + arch/powerpc/platforms/cell/pervasive.c | 1 + arch/powerpc/xmon/ppc-dis.c | 1 + 18 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 arch/powerpc/include/asm/cpu_has_feature.h (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 5eaf86ac143d..032e9f0bc708 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -24,6 +24,7 @@ #include #include #include +#include /* * SLB diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 69fb16d7a811..b77f0364df94 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -11,6 +11,7 @@ #include #include +#include /* * No cache flushing is required when address mappings are changed, diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h new file mode 100644 index 000000000000..c935c5854fed --- /dev/null +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -0,0 +1,20 @@ +#ifndef __ASM_POWERPC_CPUFEATURES_H +#define __ASM_POWERPC_CPUFEATURES_H + +#ifndef __ASSEMBLY__ + +#include + +static inline bool early_cpu_has_feature(unsigned long feature) +{ + return !!((CPU_FTRS_ALWAYS & feature) || + (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); +} + +static inline bool cpu_has_feature(unsigned long feature) +{ + return early_cpu_has_feature(feature); +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_POWERPC_CPUFEATURE_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 3d8dc9a7831d..92961bcfbe3f 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -577,17 +577,6 @@ enum { }; #endif /* __powerpc64__ */ -static inline bool early_cpu_has_feature(unsigned long feature) -{ - return !!((CPU_FTRS_ALWAYS & feature) || - (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); -} - -static inline bool cpu_has_feature(unsigned long feature) -{ - return early_cpu_has_feature(feature); -} - #define HBP_NUM 1 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 2dfd4fc41f3e..4f60db074725 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { } #include #include #include +#include typedef u64 __nocast cputime_t; typedef u64 __nocast cputime64_t; diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index 5fa6b20eba10..378167377065 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h @@ -16,6 +16,7 @@ #include #include +#include #define PPC_DBELL_MSG_BRDCAST (0x04000000) #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 4efc11dacb98..4a2beef74277 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -24,6 +24,7 @@ #include #include +#include typedef struct { unsigned int base; diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 2563c435a4b1..ef2d9ac1bc52 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,6 +13,7 @@ #include #include +#include /* * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index cbbeaf0a6597..b240666b7bc1 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -18,6 +18,7 @@ #include #include +#include /* time.c */ extern unsigned long tb_ticks_per_jiffy; diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h index 0abb97f3be10..a36c2069d8ed 100644 --- a/arch/powerpc/include/asm/xor.h +++ b/arch/powerpc/include/asm/xor.h @@ -23,6 +23,7 @@ #ifdef CONFIG_ALTIVEC #include +#include void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, unsigned long *v2_in); diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index c7097f933114..033f3385fa49 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -26,6 +26,7 @@ #include #include #include +#include struct aligninfo { unsigned char len; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac910d9982df..08887cf2b20e 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -75,6 +75,7 @@ #endif #define CREATE_TRACE_POINTS #include +#include DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a8cca88e972f..9ee2623e0f67 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 714b4ba7ab86..dba265c586df 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -66,6 +66,7 @@ #include #include #include +#include #include "setup.h" diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 00f57754407e..c3e861df4b20 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -37,6 +37,7 @@ #include #include #include +#include #define DBG(fmt...) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 5a1f015ea9f3..25a39052bf6b 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -55,6 +55,7 @@ #include #include #include +#include #ifdef DEBUG #include diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index d17e98bc0c10..e7d075077cb0 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "pervasive.h" diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c index 89098f320ad5..ee9891734149 100644 --- a/arch/powerpc/xmon/ppc-dis.c +++ b/arch/powerpc/xmon/ppc-dis.c @@ -20,6 +20,7 @@ along with this file; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include +#include #include "nonstdio.h" #include "ansidecl.h" #include "ppc.h" -- cgit v1.2.3 From bfbfc8a43c028fe3c77c00c4368890e004ca268e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 27 Jul 2016 22:46:29 +1000 Subject: powerpc: Add kconfig option to use jump labels for cpu/mmu_has_feature() Add a kconfig option to control whether we use jump label for the cpu/mmu_has_feature() checks. Currently this does nothing, but we will enabled it in the subsequent patches. Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig.debug | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 171047822b56..36d7e345f599 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -60,6 +60,15 @@ config CODE_PATCHING_SELFTEST depends on DEBUG_KERNEL default n +config JUMP_LABEL_FEATURE_CHECKS + bool "Enable use of jump label for cpu/mmu_has_feature()" + depends on JUMP_LABEL + default y + help + Selecting this options enables use of jump labels for some internal + feature checks. This should generate more optimal code for those + checks. + config FTR_FIXUP_SELFTEST bool "Run self-tests of the feature-fixup code" depends on DEBUG_KERNEL -- cgit v1.2.3 From 4db7327194dba0cb91f274b0f606785a9ee5108d Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Sat, 23 Jul 2016 14:42:41 +0530 Subject: powerpc: Add option to use jump label for cpu_has_feature() We do binary patching of asm code using CPU features, which is a one-time operation, done during early boot. However checks of CPU features in C code are currently done at run time, even though the set of CPU features can never change after boot. We can optimise this by using jump labels to implement cpu_has_feature(), meaning checks in C code are binary patched into a single nop or branch. For a C sequence along the lines of: if (cpu_has_feature(FOO)) return 2; The generated code before is roughly: ld r9,-27640(r2) ld r9,0(r9) lwz r9,32(r9) cmpwi cr7,r9,0 bge cr7, 1f li r3,2 blr 1: ... After (true): nop li r3,2 blr After (false): b 1f li r3,2 blr 1: ... mpe: Rename MAX_CPU_FEATURES as we already have a #define with that name, and define it simply as a constant, rather than doing tricks with sizeof and NULL pointers. Rename the array to cpu_feature_keys. Use the kconfig we added to guard it. Add BUILD_BUG_ON() if the feature is not a compile time constant. Rewrite the change log. Signed-off-by: Kevin Hao Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cpu_has_feature.h | 24 ++++++++++++++++++++++++ arch/powerpc/include/asm/cputable.h | 6 ++++++ arch/powerpc/include/asm/mmu.h | 1 + arch/powerpc/kernel/cputable.c | 20 ++++++++++++++++++++ arch/powerpc/lib/feature-fixups.c | 1 + 5 files changed, 52 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index c935c5854fed..b9ce6425fdea 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -11,10 +11,34 @@ static inline bool early_cpu_has_feature(unsigned long feature) (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); } +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS +#include + +#define NUM_CPU_FTR_KEYS 64 + +extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS]; + +static __always_inline bool cpu_has_feature(unsigned long feature) +{ + int i; + + BUILD_BUG_ON(!__builtin_constant_p(feature)); + + if (CPU_FTRS_ALWAYS & feature) + return true; + + if (!(CPU_FTRS_POSSIBLE & feature)) + return false; + + i = __builtin_ctzl(feature); + return static_branch_likely(&cpu_feature_keys[i]); +} +#else static inline bool cpu_has_feature(unsigned long feature) { return early_cpu_has_feature(feature); } +#endif #endif /* __ASSEMBLY__ */ #endif /* __ASM_POWERPC_CPUFEATURE_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 92961bcfbe3f..82026b419341 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -123,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, extern const char *powerpc_base_platform; +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS +extern void cpu_feature_keys_init(void); +#else +static inline void cpu_feature_keys_init(void) { } +#endif + /* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */ enum { TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 08b4c06604d6..37f55169df3e 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -112,6 +112,7 @@ #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B #ifndef __ASSEMBLY__ +#include #include #ifdef CONFIG_PPC_FSL_BOOK3E diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index d81f826d1029..e3da41f8ef93 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -2224,3 +2225,22 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) return NULL; } + +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS +struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = { + [0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT +}; +EXPORT_SYMBOL_GPL(cpu_feature_keys); + +void __init cpu_feature_keys_init(void) +{ + int i; + + for (i = 0; i < NUM_CPU_FTR_KEYS; i++) { + unsigned long f = 1ul << i; + + if (!(cur_cpu_spec->cpu_features & f)) + static_branch_disable(&cpu_feature_keys[i]); + } +} +#endif diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index ae57de78e2aa..7a6ccbb74d91 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -195,6 +195,7 @@ void __init apply_feature_fixups(void) * CPU/MMU features. */ jump_label_init(); + cpu_feature_keys_init(); } static int __init check_features(void) -- cgit v1.2.3 From c12e6f24d4137822d5019c1f78ac65bd27a3447d Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Sat, 23 Jul 2016 14:42:42 +0530 Subject: powerpc: Add option to use jump label for mmu_has_feature() As we just did for CPU features. Signed-off-by: Kevin Hao Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cpu_has_feature.h | 1 + arch/powerpc/include/asm/mmu.h | 38 ++++++++++++++++++++++++++++++ arch/powerpc/kernel/cputable.c | 17 +++++++++++++ arch/powerpc/lib/feature-fixups.c | 1 + 4 files changed, 57 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index b9ce6425fdea..039c011b4252 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -3,6 +3,7 @@ #ifndef __ASSEMBLY__ +#include #include static inline bool early_cpu_has_feature(unsigned long feature) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 37f55169df3e..1bf919aafd50 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -141,6 +141,43 @@ static inline bool early_mmu_has_feature(unsigned long feature) return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); } +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS +#include + +#define NUM_MMU_FTR_KEYS 32 + +extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS]; + +extern void mmu_feature_keys_init(void); + +static __always_inline bool mmu_has_feature(unsigned long feature) +{ + int i; + + BUILD_BUG_ON(!__builtin_constant_p(feature)); + + if (!(MMU_FTRS_POSSIBLE & feature)) + return false; + + i = __builtin_ctzl(feature); + return static_branch_likely(&mmu_feature_keys[i]); +} + +static inline void mmu_clear_feature(unsigned long feature) +{ + int i; + + i = __builtin_ctzl(feature); + cur_cpu_spec->mmu_features &= ~feature; + static_branch_disable(&mmu_feature_keys[i]); +} +#else + +static inline void mmu_feature_keys_init(void) +{ + +} + static inline bool mmu_has_feature(unsigned long feature) { return early_mmu_has_feature(feature); @@ -150,6 +187,7 @@ static inline void mmu_clear_feature(unsigned long feature) { cur_cpu_spec->mmu_features &= ~feature; } +#endif /* CONFIG_JUMP_LABEL */ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index e3da41f8ef93..74248ab18e98 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2243,4 +2243,21 @@ void __init cpu_feature_keys_init(void) static_branch_disable(&cpu_feature_keys[i]); } } + +struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = { + [0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT +}; +EXPORT_SYMBOL_GPL(mmu_feature_keys); + +void __init mmu_feature_keys_init(void) +{ + int i; + + for (i = 0; i < NUM_MMU_FTR_KEYS; i++) { + unsigned long f = 1ul << i; + + if (!(cur_cpu_spec->mmu_features & f)) + static_branch_disable(&mmu_feature_keys[i]); + } +} #endif diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 7a6ccbb74d91..077fa0ce3382 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -196,6 +196,7 @@ void __init apply_feature_fixups(void) */ jump_label_init(); cpu_feature_keys_init(); + mmu_feature_keys_init(); } static int __init check_features(void) -- cgit v1.2.3 From c812c7d8f1470ac9c8aa6d7e29b56e5845ee05fc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 23 Jul 2016 14:42:43 +0530 Subject: powerpc/mm: Catch usage of cpu/mmu_has_feature() before jump label init This allows us to catch incorrect usage of cpu_has_feature() and mmu_has_feature() prior to jump labels being initialised. mpe: Use printk() and dump_stack() rather than WARN_ON(), because WARN_ON() may not work this early in boot. Rename the Kconfig. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig.debug | 10 ++++++++++ arch/powerpc/include/asm/cpu_has_feature.h | 8 ++++++++ arch/powerpc/include/asm/mmu.h | 8 ++++++++ 3 files changed, 26 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 36d7e345f599..63292f64b25a 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -69,6 +69,16 @@ config JUMP_LABEL_FEATURE_CHECKS feature checks. This should generate more optimal code for those checks. +config JUMP_LABEL_FEATURE_CHECK_DEBUG + bool "Do extra check on feature fixup calls" + depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS + default n + help + This tries to catch incorrect usage of cpu_has_feature() and + mmu_has_feature() in the code. + + If you don't know what this means, say N. + config FTR_FIXUP_SELFTEST bool "Run self-tests of the feature-fixup code" depends on DEBUG_KERNEL diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 039c011b4252..2ef55f8968a2 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -25,6 +25,14 @@ static __always_inline bool cpu_has_feature(unsigned long feature) BUILD_BUG_ON(!__builtin_constant_p(feature)); +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG + if (!static_key_initialized) { + printk("Warning! cpu_has_feature() used prior to jump label init!\n"); + dump_stack(); + return early_cpu_has_feature(feature); + } +#endif + if (CPU_FTRS_ALWAYS & feature) return true; diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 1bf919aafd50..e2fb408f8398 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -156,6 +156,14 @@ static __always_inline bool mmu_has_feature(unsigned long feature) BUILD_BUG_ON(!__builtin_constant_p(feature)); +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG + if (!static_key_initialized) { + printk("Warning! mmu_has_feature() used prior to jump label init!\n"); + dump_stack(); + return early_mmu_has_feature(feature); + } +#endif + if (!(MMU_FTRS_POSSIBLE & feature)) return false; -- cgit v1.2.3 From e2985fd9b8de51a24fa290e06c9376a03f9a8924 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 27 Jul 2016 15:35:55 +1000 Subject: powerpc/jump_label: Annotate jump label assembly Add a comment to the generated assembler for jump labels. This makes it easier to identify them in asm listings (generated with $ make foo.s). Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/jump_label.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 47e155f15433..9878cac7b47c 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -21,7 +21,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:\n\t" - "nop\n\t" + "nop # arch_static_branch\n\t" ".pushsection __jump_table, \"aw\"\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" ".popsection \n\t" @@ -35,7 +35,7 @@ l_yes: static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { asm_volatile_goto("1:\n\t" - "b %l[l_yes]\n\t" + "b %l[l_yes] # arch_static_branch_jump\n\t" ".pushsection __jump_table, \"aw\"\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" ".popsection \n\t" -- cgit v1.2.3 From 8cb8140c4c93975d7db8fbfd2fd68f7a91f74a5d Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:35 +0530 Subject: powerpc/mm/radix: Implement tlb mmu gather flush efficiently Now that we track page size in mmu_gather, we can use address based tlbie format when doing a tlb_flush(). We don't do this if we are invalidating the full address space. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- .../powerpc/include/asm/book3s/64/tlbflush-radix.h | 2 + arch/powerpc/mm/tlb-radix.c | 59 +++++++++++++++++++++- 2 files changed, 60 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 00703e7e4c94..7ee9a195fa9c 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize) return mmu_psize_defs[psize].ap; } +extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long end, int psize); extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end); diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index e1f22700fb16..6de05c59b375 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -299,8 +299,65 @@ static int radix_get_mmu_psize(int page_size) void radix__tlb_flush(struct mmu_gather *tlb) { + int psize = 0; struct mm_struct *mm = tlb->mm; - radix__flush_tlb_mm(mm); + int page_size = tlb->page_size; + + psize = radix_get_mmu_psize(page_size); + /* + * if page size is not something we understand, do a full mm flush + */ + if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all) + radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize); + else + radix__flush_tlb_mm(mm); +} + +#define TLB_FLUSH_ALL -1UL +/* + * Number of pages above which we will do a bcast tlbie. Just a + * number at this point copied from x86 + */ +static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; + +void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long end, int psize) +{ + unsigned long pid; + unsigned long addr; + int local = mm_is_core_local(mm); + unsigned long ap = mmu_get_ap(psize); + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); + unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; + + + preempt_disable(); + pid = mm ? mm->context.id : 0; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto err_out; + + if (end == TLB_FLUSH_ALL || + (end - start) > tlb_single_page_flush_ceiling * page_size) { + if (local) + _tlbiel_pid(pid, RIC_FLUSH_TLB); + else + _tlbie_pid(pid, RIC_FLUSH_TLB); + goto err_out; + } + for (addr = start; addr < end; addr += page_size) { + + if (local) + _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); + else { + if (lock_tlbie) + raw_spin_lock(&native_tlbie_lock); + _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); + if (lock_tlbie) + raw_spin_unlock(&native_tlbie_lock); + } + } +err_out: + preempt_enable(); } void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, -- cgit v1.2.3 From 138ee7ee0186dec230dde0c0774dbf5d310bdc39 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:37 +0530 Subject: powerpc/mm/hash: Add helper for finding SLBE LLP encoding Replace opencoding of the same at multiple places with the helper. No functional change with this patch. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 9 +++++++++ arch/powerpc/include/asm/kvm_book3s_64.h | 3 +-- arch/powerpc/mm/hash_native_64.c | 6 ++---- 3 files changed, 12 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 032e9f0bc708..287a656ceb57 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -191,6 +191,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) BUG(); } +static inline unsigned long get_sllp_encoding(int psize) +{ + unsigned long sllp; + + sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) | + ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4); + return sllp; +} + #endif /* __ASSEMBLY__ */ /* diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 1f4497fb5b83..88d17b4ea9c8 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -181,8 +181,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, switch (b_psize) { case MMU_PAGE_4K: - sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) | - ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4); + sllp = get_sllp_encoding(a_psize); rb |= sllp << 5; /* AP field */ rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ break; diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 88ce7d212320..d2d8efd79cbf 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -72,8 +72,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) /* clear out bits after (52) [0....52.....63] */ va &= ~((1ul << (64 - 52)) - 1); va |= ssize << 8; - sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | - ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); + sllp = get_sllp_encoding(apsize); va |= sllp << 5; asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) @@ -122,8 +121,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) /* clear out bits after(52) [0....52.....63] */ va &= ~((1ul << (64 - 52)) - 1); va |= ssize << 8; - sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | - ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); + sllp = get_sllp_encoding(apsize); va |= sllp << 5; asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" : : "r"(va) : "memory"); -- cgit v1.2.3 From 13dce033637b8e4d3266667c4df4d72fc37dc0a9 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:38 +0530 Subject: powerpc/mm: Use hugetlb flush functions Use flush_hugetlb_page instead of flush_tlb_page when we clear flush the pte. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hugetlb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index e2d9f4996e5c..c5517f463ec7 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -147,7 +147,7 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, { pte_t pte; pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); - flush_tlb_page(vma, addr); + flush_hugetlb_page(vma, addr); } static inline int huge_pte_none(pte_t pte) -- cgit v1.2.3 From 9d4dab11585ab2ef25d5d2a00a23b25b552c4080 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:39 +0530 Subject: powerpc/mm: Drop multiple definition of mm_is_core_local Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/tlb.h | 13 +++++++++++++ arch/powerpc/mm/tlb-radix.c | 6 ------ arch/powerpc/mm/tlb_nohash.c | 6 ------ 3 files changed, 13 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 20733fa518ae..f6f68f73e858 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -46,5 +46,18 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, #endif } +#ifdef CONFIG_SMP +static inline int mm_is_core_local(struct mm_struct *mm) +{ + return cpumask_subset(mm_cpumask(mm), + topology_sibling_cpumask(smp_processor_id())); +} +#else +static inline int mm_is_core_local(struct mm_struct *mm) +{ + return 1; +} +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_TLB_H */ diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 6de05c59b375..4d0f03ad0996 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -165,12 +165,6 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd EXPORT_SYMBOL(radix__local_flush_tlb_page); #ifdef CONFIG_SMP -static int mm_is_core_local(struct mm_struct *mm) -{ - return cpumask_subset(mm_cpumask(mm), - topology_sibling_cpumask(smp_processor_id())); -} - void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index f4668488512c..050badc0ebd3 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -215,12 +215,6 @@ EXPORT_SYMBOL(local_flush_tlb_page); static DEFINE_RAW_SPINLOCK(tlbivax_lock); -static int mm_is_core_local(struct mm_struct *mm) -{ - return cpumask_subset(mm_cpumask(mm), - topology_sibling_cpumask(smp_processor_id())); -} - struct tlb_flush_param { unsigned long addr; unsigned int pid; -- cgit v1.2.3 From d8e91e93e97817e8ba069c9c44fb712619cf0a43 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:40 +0530 Subject: powerpc/mm/radix: Add tlb flush of THP ptes Instead of flushing the entire mm, implement a flush_pmd_tlb_range Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/tlbflush-radix.h | 2 ++ arch/powerpc/include/asm/book3s/64/tlbflush.h | 9 +++++++++ arch/powerpc/mm/pgtable-book3s64.c | 4 ++-- arch/powerpc/mm/tlb-radix.c | 7 +++++++ 4 files changed, 20 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 7ee9a195fa9c..30fad5d78e30 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -12,6 +12,8 @@ static inline int mmu_get_ap(int psize) extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize); +extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end); diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 96e5769b18b0..0790c4e92a64 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -7,6 +7,15 @@ #include #include +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return radix__flush_pmd_tlb_range(vma, start, end); + return hash__flush_tlb_range(vma, start, end); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 670318766545..7bb8acffe876 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -33,7 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, changed = !pmd_same(*(pmdp), entry); if (changed) { __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; } @@ -66,7 +66,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); /* * This ensures that generic code that rely on IRQ disabling * to prevent a parallel THP split work as expected. diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 4d0f03ad0996..456c155fe76c 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -391,3 +391,10 @@ void radix__flush_tlb_lpid(unsigned long lpid) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } EXPORT_SYMBOL(radix__flush_tlb_lpid); + +void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); +} +EXPORT_SYMBOL(radix__flush_pmd_tlb_range); -- cgit v1.2.3 From f22dfc9158a8a999325e5aeb4b2ceda553430575 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:41 +0530 Subject: powerpc/mm/radix: Rename function and drop unused arg Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/tlbflush-radix.h | 10 +++++----- arch/powerpc/mm/hugetlbpage-radix.c | 4 ++-- arch/powerpc/mm/tlb-radix.c | 16 ++++++++-------- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 30fad5d78e30..a0c7f76c13b3 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -20,20 +20,20 @@ extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end extern void radix__local_flush_tlb_mm(struct mm_struct *mm); extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap, int nid); extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); +extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, + unsigned long ap); extern void radix__tlb_flush(struct mmu_gather *tlb); #ifdef CONFIG_SMP extern void radix__flush_tlb_mm(struct mm_struct *mm); extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap, int nid); extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); +extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, + unsigned long ap); #else #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) -#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) +#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p) #define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr) #endif extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 1e11559e1aac..0dfa1816f0c6 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -20,7 +20,7 @@ void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) WARN(1, "Wrong huge page shift\n"); return ; } - radix___flush_tlb_page(vma->vm_mm, vmaddr, ap, 0); + radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, ap); } void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) @@ -37,7 +37,7 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v WARN(1, "Wrong huge page shift\n"); return ; } - radix___local_flush_tlb_page(vma->vm_mm, vmaddr, ap, 0); + radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, ap); } /* diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 456c155fe76c..fb9974abdde7 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -140,8 +140,8 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) } EXPORT_SYMBOL(radix__local_flush_tlb_pwc); -void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap, int nid) +void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, + unsigned long ap) { unsigned long pid; @@ -159,8 +159,8 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd if (vma && is_vm_hugetlb_page(vma)) return __local_flush_hugetlb_page(vma, vmaddr); #endif - radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, - mmu_get_ap(mmu_virtual_psize), 0); + radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, + mmu_get_ap(mmu_virtual_psize)); } EXPORT_SYMBOL(radix__local_flush_tlb_page); @@ -215,8 +215,8 @@ no_context: } EXPORT_SYMBOL(radix__flush_tlb_pwc); -void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap, int nid) +void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, + unsigned long ap) { unsigned long pid; @@ -244,8 +244,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) if (vma && is_vm_hugetlb_page(vma)) return flush_hugetlb_page(vma, vmaddr); #endif - radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, - mmu_get_ap(mmu_virtual_psize), 0); + radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, + mmu_get_ap(mmu_virtual_psize)); } EXPORT_SYMBOL(radix__flush_tlb_page); -- cgit v1.2.3 From fbfa26d85418a155feacdb0f73cbf938f1027a8c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:42 +0530 Subject: powerpc/mm/radix/hugetlb: Add helper for finding page size from hstate Use the helper instead of open coding the same at multiple place Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hugetlb-radix.h | 15 +++++++++++ .../powerpc/include/asm/book3s/64/tlbflush-radix.h | 4 +-- arch/powerpc/mm/hugetlbpage-radix.c | 29 ++++++---------------- arch/powerpc/mm/tlb-radix.c | 10 +++++--- 4 files changed, 30 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h index 60f47649306f..c45189aa7476 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h @@ -11,4 +11,19 @@ extern unsigned long radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); + +static inline int hstate_get_psize(struct hstate *hstate) +{ + unsigned long shift; + + shift = huge_page_shift(hstate); + if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) + return MMU_PAGE_2M; + else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) + return MMU_PAGE_1G; + else { + WARN(1, "Wrong huge page shift\n"); + return mmu_virtual_psize; + } +} #endif diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index a0c7f76c13b3..10eb0d1e3140 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -22,14 +22,14 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm); extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap); + int psize); extern void radix__tlb_flush(struct mmu_gather *tlb); #ifdef CONFIG_SMP extern void radix__flush_tlb_mm(struct mm_struct *mm); extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap); + int psize); #else #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 0dfa1816f0c6..1eca0deaf89b 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -5,39 +5,24 @@ #include #include #include +#include void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { - unsigned long ap, shift; + int psize; struct hstate *hstate = hstate_file(vma->vm_file); - shift = huge_page_shift(hstate); - if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) - ap = mmu_get_ap(MMU_PAGE_2M); - else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) - ap = mmu_get_ap(MMU_PAGE_1G); - else { - WARN(1, "Wrong huge page shift\n"); - return ; - } - radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, ap); + psize = hstate_get_psize(hstate); + radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); } void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { - unsigned long ap, shift; + int psize; struct hstate *hstate = hstate_file(vma->vm_file); - shift = huge_page_shift(hstate); - if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) - ap = mmu_get_ap(MMU_PAGE_2M); - else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) - ap = mmu_get_ap(MMU_PAGE_1G); - else { - WARN(1, "Wrong huge page shift\n"); - return ; - } - radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, ap); + psize = hstate_get_psize(hstate); + radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); } /* diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index fb9974abdde7..48df05ef5231 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -141,9 +141,10 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) EXPORT_SYMBOL(radix__local_flush_tlb_pwc); void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap) + int psize) { unsigned long pid; + unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; @@ -160,7 +161,7 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd return __local_flush_hugetlb_page(vma, vmaddr); #endif radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, - mmu_get_ap(mmu_virtual_psize)); + mmu_virtual_psize); } EXPORT_SYMBOL(radix__local_flush_tlb_page); @@ -216,9 +217,10 @@ no_context: EXPORT_SYMBOL(radix__flush_tlb_pwc); void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, - unsigned long ap) + int psize) { unsigned long pid; + unsigned long ap = mmu_get_ap(psize); preempt_disable(); pid = mm ? mm->context.id : 0; @@ -245,7 +247,7 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) return flush_hugetlb_page(vma, vmaddr); #endif radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, - mmu_get_ap(mmu_virtual_psize)); + mmu_virtual_psize); } EXPORT_SYMBOL(radix__flush_tlb_page); -- cgit v1.2.3 From 5491ae7b6f48499b8892822cff371746f0b4102f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:43 +0530 Subject: powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range Some archs like ppc64 need to do special things when flushing tlb for hugepage. Add a new helper to flush hugetlb tlb range. This helps us to avoid flushing the entire tlb mapping for the pid. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/tlbflush-radix.h | 2 ++ arch/powerpc/include/asm/book3s/64/tlbflush.h | 10 ++++++++++ arch/powerpc/mm/hugetlbpage-radix.c | 10 ++++++++++ mm/hugetlb.c | 10 +++++++++- 4 files changed, 31 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 10eb0d1e3140..65037762b120 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize) return mmu_psize_defs[psize].ap; } +extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize); extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 0790c4e92a64..146b269de8fa 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, return hash__flush_tlb_range(vma, start, end); } +#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ + if (radix_enabled()) + return radix__flush_hugetlb_tlb_range(vma, start, end); + return hash__flush_tlb_range(vma, start, end); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 1eca0deaf89b..35254a678456 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); } +void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + int psize; + struct hstate *hstate = hstate_file(vma->vm_file); + + psize = hstate_get_psize(hstate); + radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); +} + /* * A vairant of hugetlb_get_unmapped_area doing topdown search * FIXME!! should we do as x86 does or non hugetlb area does ? diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f904246a8fd5..af2d88253bfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3938,6 +3938,14 @@ same_page: return i ? i : -EFAULT; } +#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +/* + * ARCHes with special requirements for evicting HUGETLB backing TLB entries can + * implement this. + */ +#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#endif + unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { @@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, * once we release i_mmap_rwsem, another task can do the final put_page * and that page table be reused and filled with junk. */ - flush_tlb_range(vma, start, end); + flush_hugetlb_tlb_range(vma, start, end); mmu_notifier_invalidate_range(mm, start, end); i_mmap_unlock_write(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(mm, start, end); -- cgit v1.2.3 From 703b41ad1a8759949adc1b641a82c2b227d18223 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Jul 2016 15:06:44 +0530 Subject: powerpc/mm: remove flush_tlb_page_nohash This should be same as flush_tlb_page except for hash32. For hash32 I guess the existing code is wrong, because we don't seem to be flushing tlb for Hash != 0 case at all. Fix this by switching to calling flush_tlb_page() which does the right thing by flushing tlb for both hash and nohash case with hash32 Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 5 ----- arch/powerpc/include/asm/book3s/64/tlbflush.h | 8 -------- arch/powerpc/include/asm/tlbflush.h | 1 - arch/powerpc/mm/pgtable.c | 2 +- arch/powerpc/mm/tlb_hash32.c | 11 ----------- 5 files changed, 1 insertion(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index f12ddf5e8de5..2f6373144e2c 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -75,11 +75,6 @@ static inline void hash__flush_tlb_page(struct vm_area_struct *vma, { } -static inline void hash__flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) -{ -} - static inline void hash__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 146b269de8fa..72b925f97bab 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -57,14 +57,6 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, return hash__local_flush_tlb_page(vma, vmaddr); } -static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) -{ - if (radix_enabled()) - return radix__flush_tlb_page(vma, vmaddr); - return hash__flush_tlb_page_nohash(vma, vmaddr); -} - static inline void tlb_flush(struct mmu_gather *tlb) { if (radix_enabled()) diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 1b38eea28e5a..13dbcd41885e 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -54,7 +54,6 @@ extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) #endif -#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) #elif defined(CONFIG_PPC_STD_MMU_32) diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 88a307504b5a..0b6fb244d0a1 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -225,7 +225,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, if (!is_vm_hugetlb_page(vma)) assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(ptep, entry); - flush_tlb_page_nohash(vma, address); + flush_tlb_page(vma, address); } return changed; } diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c index 558e30cce33e..702d7689d714 100644 --- a/arch/powerpc/mm/tlb_hash32.c +++ b/arch/powerpc/mm/tlb_hash32.c @@ -48,17 +48,6 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) } EXPORT_SYMBOL(flush_hash_entry); -/* - * Called by ptep_set_access_flags, must flush on CPUs for which the - * DSI handler can't just "fixup" the TLB on a write fault - */ -void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr) -{ - if (Hash != 0) - return; - _tlbie(addr); -} - /* * Called at the end of a mmu_gather operation to make sure the * TLB flush is completely done. -- cgit v1.2.3 From 8d460f6156cd55d981d109f01b82cbea8cf80e57 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:31 +0800 Subject: powerpc/process: Add the function flush_tmregs_to_thread This patch creates a function flush_tmregs_to_thread which will then be used by subsequent patches in this series. The function checks for self tracing ptrace interface attempts while in the TM context and logs appropriate warning message. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/switch_to.h | 8 ++++++++ arch/powerpc/kernel/process.c | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 17c8380673a6..0a74ebe934e1 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -75,6 +75,14 @@ static inline void disable_kernel_spe(void) static inline void __giveup_spe(struct task_struct *t) { } #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +extern void flush_tmregs_to_thread(struct task_struct *); +#else +static inline void flush_tmregs_to_thread(struct task_struct *t) +{ +} +#endif + static inline void clear_task_ebb(struct task_struct *t) { #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9ee2623e0f67..58ccf86415b4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1074,6 +1074,26 @@ static inline void restore_sprs(struct thread_struct *old_thread, #endif } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void flush_tmregs_to_thread(struct task_struct *tsk) +{ + /* + * Process self tracing is not yet supported through + * ptrace interface. Ptrace generic code should have + * prevented this from happening in the first place. + * Warn once here with the message, if some how it + * is attempted. + */ + WARN_ONCE(tsk == current, + "Not expecting ptrace on self: TM regs may be incorrect\n"); + + /* + * If task is not current, it should have been flushed + * already to it's thread_struct during __switch_to(). + */ +} +#endif + struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { -- cgit v1.2.3 From 1ec8549d4464a2004523acee8ce096a3ac916440 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:32 +0800 Subject: powerpc/ptrace: Enable in transaction NT_PRFPREG ptrace requests This patch enables in transaction NT_PRFPREG ptrace requests. The function fpr_get which gets the running value of all FPR registers and the function fpr_set which sets the running value of of all FPR registers work on the running set of FPR registers whose location will be different if transaction is active. This patch makes these functions adapt to situations when the transaction is active. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 93 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 89 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 134bee9ac664..e8d126c3e7cd 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -358,6 +358,29 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, return ret; } +/* + * When the transaction is active, 'transact_fp' holds the current running + * value of all FPR registers and 'fp_state' holds the last checkpointed + * value of all FPR registers for the current transaction. When transaction + * is not active 'fp_state' holds the current running state of all the FPR + * registers. So this function which returns the current running values of + * all the FPR registers, needs to know whether any transaction is active + * or not. + * + * Userspace interface buffer layout: + * + * struct data { + * u64 fpr[32]; + * u64 fpscr; + * }; + * + * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM + * which determines the final code in this function. All the combinations of + * these two config options are possible except the one below as transactional + * memory config pulls in CONFIG_VSX automatically. + * + * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM) + */ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) @@ -368,14 +391,31 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, #endif flush_fp_to_thread(target); -#ifdef CONFIG_VSX +#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM) + /* copy to local buffer then write that out */ + if (MSR_TM_ACTIVE(target->thread.regs->msr)) { + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.TS_TRANS_FPR(i); + buf[32] = target->thread.transact_fp.fpscr; + } else { + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.TS_FPR(i); + buf[32] = target->thread.fp_state.fpscr; + } + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); +#endif + +#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM) /* copy to local buffer then write that out */ for (i = 0; i < 32 ; i++) buf[i] = target->thread.TS_FPR(i); buf[32] = target->thread.fp_state.fpscr; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); +#endif -#else +#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM) BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != offsetof(struct thread_fp_state, fpr[32])); @@ -384,6 +424,29 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, #endif } +/* + * When the transaction is active, 'transact_fp' holds the current running + * value of all FPR registers and 'fp_state' holds the last checkpointed + * value of all FPR registers for the current transaction. When transaction + * is not active 'fp_state' holds the current running state of all the FPR + * registers. So this function which setss the current running values of + * all the FPR registers, needs to know whether any transaction is active + * or not. + * + * Userspace interface buffer layout: + * + * struct data { + * u64 fpr[32]; + * u64 fpscr; + * }; + * + * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM + * which determines the final code in this function. All the combinations of + * these two config options are possible except the one below as transactional + * memory config pulls in CONFIG_VSX automatically. + * + * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM) + */ static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) @@ -394,7 +457,27 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, #endif flush_fp_to_thread(target); -#ifdef CONFIG_VSX +#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM) + /* copy to local buffer then write that out */ + i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); + if (i) + return i; + + if (MSR_TM_ACTIVE(target->thread.regs->msr)) { + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + for (i = 0; i < 32 ; i++) + target->thread.TS_TRANS_FPR(i) = buf[i]; + target->thread.transact_fp.fpscr = buf[32]; + } else { + for (i = 0; i < 32 ; i++) + target->thread.TS_FPR(i) = buf[i]; + target->thread.fp_state.fpscr = buf[32]; + } + return 0; +#endif + +#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM) /* copy to local buffer then write that out */ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); if (i) @@ -403,7 +486,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, target->thread.TS_FPR(i) = buf[i]; target->thread.fp_state.fpscr = buf[32]; return 0; -#else +#endif + +#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM) BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != offsetof(struct thread_fp_state, fpr[32])); -- cgit v1.2.3 From d844e279152c27e5b1a56dc40de08ddfd99176f3 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:33 +0800 Subject: powerpc/ptrace: Enable in transaction NT_PPC_VMX ptrace requests This patch enables in transaction NT_PPC_VMX ptrace requests. The function vr_get which gets the running value of all VMX registers and the function vr_set which sets the running value of of all VMX registers work on the running set of VMX registers whose location will be different if transaction is active. This patch makes these functions adapt to situations when the transaction is active. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 90 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index e8d126c3e7cd..72da7dc924f1 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -518,10 +518,28 @@ static int vr_active(struct task_struct *target, return target->thread.used_vr ? regset->n : 0; } +/* + * When the transaction is active, 'transact_vr' holds the current running + * value of all the VMX registers and 'vr_state' holds the last checkpointed + * value of all the VMX registers for the current transaction to fall back + * on in case it aborts. When transaction is not active 'vr_state' holds + * the current running state of all the VMX registers. So this function which + * gets the current running values of all the VMX registers, needs to know + * whether any transaction is active or not. + * + * Userspace interface buffer layout: + * + * struct data { + * vector128 vr[32]; + * vector128 vscr; + * vector128 vrsave; + * }; + */ static int vr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + struct thread_vr_state *addr; int ret; flush_altivec_to_thread(target); @@ -529,8 +547,19 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != offsetof(struct thread_vr_state, vr[32])); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) { + flush_fp_to_thread(target); + flush_tmregs_to_thread(target); + addr = &target->thread.transact_vr; + } else { + addr = &target->thread.vr_state; + } +#else + addr = &target->thread.vr_state; +#endif ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.vr_state, 0, + addr, 0, 33 * sizeof(vector128)); if (!ret) { /* @@ -541,7 +570,16 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) + vrsave.word = target->thread.transact_vrsave; + else + vrsave.word = target->thread.vrsave; +#else vrsave.word = target->thread.vrsave; +#endif + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); } @@ -549,10 +587,28 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, return ret; } +/* + * When the transaction is active, 'transact_vr' holds the current running + * value of all the VMX registers and 'vr_state' holds the last checkpointed + * value of all the VMX registers for the current transaction to fall back + * on in case it aborts. When transaction is not active 'vr_state' holds + * the current running state of all the VMX registers. So this function which + * sets the current running values of all the VMX registers, needs to know + * whether any transaction is active or not. + * + * Userspace interface buffer layout: + * + * struct data { + * vector128 vr[32]; + * vector128 vscr; + * vector128 vrsave; + * }; + */ static int vr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + struct thread_vr_state *addr; int ret; flush_altivec_to_thread(target); @@ -560,8 +616,19 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != offsetof(struct thread_vr_state, vr[32])); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) { + flush_fp_to_thread(target); + flush_tmregs_to_thread(target); + addr = &target->thread.transact_vr; + } else { + addr = &target->thread.vr_state; + } +#else + addr = &target->thread.vr_state; +#endif ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.vr_state, 0, + addr, 0, 33 * sizeof(vector128)); if (!ret && count > 0) { /* @@ -572,11 +639,28 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) + vrsave.word = target->thread.transact_vrsave; + else + vrsave.word = target->thread.vrsave; +#else vrsave.word = target->thread.vrsave; +#endif ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); - if (!ret) + if (!ret) { + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) + target->thread.transact_vrsave = vrsave.word; + else + target->thread.vrsave = vrsave.word; +#else target->thread.vrsave = vrsave.word; +#endif + } } return ret; -- cgit v1.2.3 From 94b7d3610e5b03399f1791d7905103f5298784bc Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:34 +0800 Subject: powerpc/ptrace: Enable in transaction NT_PPC_VSX ptrace requests This patch enables in transaction NT_PPC_VSX ptrace requests. The function vsr_get which gets the running value of all VSX registers and the function vsr_set which sets the running value of of all VSX registers work on the running set of VMX registers whose location will be different if transaction is active. This patch makes these functions adapt to situations when the transaction is active. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 64 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 72da7dc924f1..9c0e405dc4cd 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -681,6 +681,21 @@ static int vsr_active(struct task_struct *target, return target->thread.used_vsr ? regset->n : 0; } +/* + * When the transaction is active, 'transact_fp' holds the current running + * value of all FPR registers and 'fp_state' holds the last checkpointed + * value of all FPR registers for the current transaction. When transaction + * is not active 'fp_state' holds the current running state of all the FPR + * registers. So this function which returns the current running values of + * all the FPR registers, needs to know whether any transaction is active + * or not. + * + * Userspace interface buffer layout: + * + * struct data { + * u64 vsx[32]; + * }; + */ static int vsr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) @@ -688,16 +703,47 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset, u64 buf[32]; int ret, i; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); +#endif flush_vsx_to_thread(target); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) { + for (i = 0; i < 32 ; i++) + buf[i] = target->thread. + transact_fp.fpr[i][TS_VSRLOWOFFSET]; + } else { + for (i = 0; i < 32 ; i++) + buf[i] = target->thread. + fp_state.fpr[i][TS_VSRLOWOFFSET]; + } +#else for (i = 0; i < 32 ; i++) buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; +#endif ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); return ret; } +/* + * When the transaction is active, 'transact_fp' holds the current running + * value of all FPR registers and 'fp_state' holds the last checkpointed + * value of all FPR registers for the current transaction. When transaction + * is not active 'fp_state' holds the current running state of all the FPR + * registers. So this function which sets the current running values of all + * the FPR registers, needs to know whether any transaction is active or not. + * + * Userspace interface buffer layout: + * + * struct data { + * u64 vsx[32]; + * }; + */ static int vsr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) @@ -705,12 +751,30 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, u64 buf[32]; int ret,i; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); +#endif flush_vsx_to_thread(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(target->thread.regs->msr)) { + for (i = 0; i < 32 ; i++) + target->thread.transact_fp. + fpr[i][TS_VSRLOWOFFSET] = buf[i]; + } else { + for (i = 0; i < 32 ; i++) + target->thread.fp_state. + fpr[i][TS_VSRLOWOFFSET] = buf[i]; + } +#else for (i = 0; i < 32 ; i++) target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; +#endif return ret; -- cgit v1.2.3 From 04fcadce0e5a8d2b5f73f0992a0ead324f338db1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:35 +0800 Subject: powerpc/ptrace: Adapt gpr32_get, gpr32_set functions for transaction This patch splits gpr32_get, gpr32_set functions to accommodate in transaction ptrace requests implemented in patches later in the series. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 64 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 51 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 9c0e405dc4cd..83cd0a18fec9 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -907,24 +907,35 @@ static const struct user_regset_view user_ppc_native_view = { #ifdef CONFIG_PPC64 #include -static int gpr32_get(struct task_struct *target, +static int gpr32_get_common(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + void *kbuf, void __user *ubuf, bool tm_active) { const unsigned long *regs = &target->thread.regs->gpr[0]; + const unsigned long *ckpt_regs; compat_ulong_t *k = kbuf; compat_ulong_t __user *u = ubuf; compat_ulong_t reg; int i; - if (target->thread.regs == NULL) - return -EIO; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + ckpt_regs = &target->thread.ckpt_regs.gpr[0]; +#endif + if (tm_active) { + regs = ckpt_regs; + } else { + if (target->thread.regs == NULL) + return -EIO; - if (!FULL_REGS(target->thread.regs)) { - /* We have a partial register set. Fill 14-31 with bogus values */ - for (i = 14; i < 32; i++) - target->thread.regs->gpr[i] = NV_REG_POISON; + if (!FULL_REGS(target->thread.regs)) { + /* + * We have a partial register set. + * Fill 14-31 with bogus values. + */ + for (i = 14; i < 32; i++) + target->thread.regs->gpr[i] = NV_REG_POISON; + } } pos /= sizeof(reg); @@ -964,20 +975,31 @@ static int gpr32_get(struct task_struct *target, PT_REGS_COUNT * sizeof(reg), -1); } -static int gpr32_set(struct task_struct *target, +static int gpr32_set_common(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) + const void *kbuf, const void __user *ubuf, bool tm_active) { unsigned long *regs = &target->thread.regs->gpr[0]; + unsigned long *ckpt_regs; const compat_ulong_t *k = kbuf; const compat_ulong_t __user *u = ubuf; compat_ulong_t reg; - if (target->thread.regs == NULL) - return -EIO; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + ckpt_regs = &target->thread.ckpt_regs.gpr[0]; +#endif - CHECK_FULL_REGS(target->thread.regs); + if (tm_active) { + regs = ckpt_regs; + } else { + regs = &target->thread.regs->gpr[0]; + + if (target->thread.regs == NULL) + return -EIO; + + CHECK_FULL_REGS(target->thread.regs); + } pos /= sizeof(reg); count /= sizeof(reg); @@ -1037,6 +1059,22 @@ static int gpr32_set(struct task_struct *target, (PT_TRAP + 1) * sizeof(reg), -1); } +static int gpr32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0); +} + +static int gpr32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0); +} + /* * These are the regset flavors matching the CONFIG_PPC32 native set. */ -- cgit v1.2.3 From 25847fb195ae09ba28896184d361acc73aaa2fb1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:36 +0800 Subject: powerpc/ptrace: Enable support for NT_PPC_CGPR This patch enables support for TM checkpointed GPR register set ELF core note NT_PPC_CGPR based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding a register set REGSET_CGPR in powerpc corresponding to the ELF core note section added. It implements the get, set and active functions for this new register set added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 222 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 83cd0a18fec9..ce853c72fc74 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -181,6 +181,26 @@ static int set_user_msr(struct task_struct *task, unsigned long msr) return 0; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static unsigned long get_user_ckpt_msr(struct task_struct *task) +{ + return task->thread.ckpt_regs.msr | task->thread.fpexc_mode; +} + +static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr) +{ + task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; + task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; + return 0; +} + +static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap) +{ + task->thread.ckpt_regs.trap = trap & 0xfff0; + return 0; +} +#endif + #ifdef CONFIG_PPC64 static int get_user_dscr(struct task_struct *task, unsigned long *data) { @@ -847,6 +867,172 @@ static int evr_set(struct task_struct *target, const struct user_regset *regset, } #endif /* CONFIG_SPE */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/** + * tm_cgpr_active - get active number of registers in CGPR + * @target: The target task. + * @regset: The user regset structure. + * + * This function checks for the active number of available + * regisers in transaction checkpointed GPR category. + */ +static int tm_cgpr_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return 0; + + return regset->n; +} + +/** + * tm_cgpr_get - get CGPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy from. + * @ubuf: User buffer to copy into. + * + * This function gets transaction checkpointed GPR registers. + * + * When the transaction is active, 'ckpt_regs' holds all the checkpointed + * GPR register values for the current transaction to fall back on if it + * aborts in between. This function gets those checkpointed GPR registers. + * The userspace interface buffer layout is as follows. + * + * struct data { + * struct pt_regs ckpt_regs; + * }; + */ +static int tm_cgpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.ckpt_regs, + 0, offsetof(struct pt_regs, msr)); + if (!ret) { + unsigned long msr = get_user_ckpt_msr(target); + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr, + offsetof(struct pt_regs, msr), + offsetof(struct pt_regs, msr) + + sizeof(msr)); + } + + BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != + offsetof(struct pt_regs, msr) + sizeof(long)); + + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.ckpt_regs.orig_gpr3, + offsetof(struct pt_regs, orig_gpr3), + sizeof(struct pt_regs)); + if (!ret) + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_regs), -1); + + return ret; +} + +/* + * tm_cgpr_set - set the CGPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy into. + * @ubuf: User buffer to copy from. + * + * This function sets in transaction checkpointed GPR registers. + * + * When the transaction is active, 'ckpt_regs' holds the checkpointed + * GPR register values for the current transaction to fall back on if it + * aborts in between. This function sets those checkpointed GPR registers. + * The userspace interface buffer layout is as follows. + * + * struct data { + * struct pt_regs ckpt_regs; + * }; + */ +static int tm_cgpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long reg; + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.ckpt_regs, + 0, PT_MSR * sizeof(reg)); + + if (!ret && count > 0) { + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®, + PT_MSR * sizeof(reg), + (PT_MSR + 1) * sizeof(reg)); + if (!ret) + ret = set_user_ckpt_msr(target, reg); + } + + BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != + offsetof(struct pt_regs, msr) + sizeof(long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.ckpt_regs.orig_gpr3, + PT_ORIG_R3 * sizeof(reg), + (PT_MAX_PUT_REG + 1) * sizeof(reg)); + + if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret) + ret = user_regset_copyin_ignore( + &pos, &count, &kbuf, &ubuf, + (PT_MAX_PUT_REG + 1) * sizeof(reg), + PT_TRAP * sizeof(reg)); + + if (!ret && count > 0) { + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®, + PT_TRAP * sizeof(reg), + (PT_TRAP + 1) * sizeof(reg)); + if (!ret) + ret = set_user_ckpt_trap(target, reg); + } + + if (!ret) + ret = user_regset_copyin_ignore( + &pos, &count, &kbuf, &ubuf, + (PT_TRAP + 1) * sizeof(reg), -1); + + return ret; +} +#endif /* * These are our native regset flavors. @@ -863,6 +1049,9 @@ enum powerpc_regset { #ifdef CONFIG_SPE REGSET_SPE, #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + REGSET_TM_CGPR, /* TM checkpointed GPR registers */ +#endif }; static const struct user_regset native_regsets[] = { @@ -897,6 +1086,13 @@ static const struct user_regset native_regsets[] = { .active = evr_active, .get = evr_get, .set = evr_set }, #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + [REGSET_TM_CGPR] = { + .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG, + .size = sizeof(long), .align = sizeof(long), + .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set + }, +#endif }; static const struct user_regset_view user_ppc_native_view = { @@ -1059,6 +1255,24 @@ static int gpr32_set_common(struct task_struct *target, (PT_TRAP + 1) * sizeof(reg), -1); } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static int tm_cgpr32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1); +} + +static int tm_cgpr32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1); +} +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + static int gpr32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -1103,6 +1317,14 @@ static const struct user_regset compat_regsets[] = { .active = evr_active, .get = evr_get, .set = evr_set }, #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + [REGSET_TM_CGPR] = { + .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG, + .size = sizeof(long), .align = sizeof(long), + .active = tm_cgpr_active, + .get = tm_cgpr32_get, .set = tm_cgpr32_set + }, +#endif }; static const struct user_regset_view user_ppc_compat_view = { -- cgit v1.2.3 From 19cbcbf75a0c1b6b07b9b07de36f30c73e58d230 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:37 +0800 Subject: powerpc/ptrace: Enable support for NT_PPC_CFPR This patch enables support for TM checkpointed FPR register set ELF core note NT_PPC_CFPR based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding a register set REGSET_CFPR in powerpc corresponding to the ELF core note section added. It implements the get, set and active functions for this new register set added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 126 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ce853c72fc74..b216c397b84f 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1032,6 +1032,121 @@ static int tm_cgpr_set(struct task_struct *target, return ret; } + +/** + * tm_cfpr_active - get active number of registers in CFPR + * @target: The target task. + * @regset: The user regset structure. + * + * This function checks for the active number of available + * regisers in transaction checkpointed FPR category. + */ +static int tm_cfpr_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return 0; + + return regset->n; +} + +/** + * tm_cfpr_get - get CFPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy from. + * @ubuf: User buffer to copy into. + * + * This function gets in transaction checkpointed FPR registers. + * + * When the transaction is active 'fp_state' holds the checkpointed + * values for the current transaction to fall back on if it aborts + * in between. This function gets those checkpointed FPR registers. + * The userspace interface buffer layout is as follows. + * + * struct data { + * u64 fpr[32]; + * u64 fpscr; + *}; + */ +static int tm_cfpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + u64 buf[33]; + int i; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + /* copy to local buffer then write that out */ + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.TS_FPR(i); + buf[32] = target->thread.fp_state.fpscr; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); +} + +/** + * tm_cfpr_set - set CFPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy into. + * @ubuf: User buffer to copy from. + * + * This function sets in transaction checkpointed FPR registers. + * + * When the transaction is active 'fp_state' holds the checkpointed + * FPR register values for the current transaction to fall back on + * if it aborts in between. This function sets these checkpointed + * FPR registers. The userspace interface buffer layout is as follows. + * + * struct data { + * u64 fpr[32]; + * u64 fpscr; + *}; + */ +static int tm_cfpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + u64 buf[33]; + int i; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + /* copy to local buffer then write that out */ + i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); + if (i) + return i; + for (i = 0; i < 32 ; i++) + target->thread.TS_FPR(i) = buf[i]; + target->thread.fp_state.fpscr = buf[32]; + return 0; +} #endif /* @@ -1051,6 +1166,7 @@ enum powerpc_regset { #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM REGSET_TM_CGPR, /* TM checkpointed GPR registers */ + REGSET_TM_CFPR, /* TM checkpointed FPR registers */ #endif }; @@ -1092,6 +1208,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(long), .align = sizeof(long), .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set }, + [REGSET_TM_CFPR] = { + .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG, + .size = sizeof(double), .align = sizeof(double), + .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set + }, #endif }; @@ -1324,6 +1445,11 @@ static const struct user_regset compat_regsets[] = { .active = tm_cgpr_active, .get = tm_cgpr32_get, .set = tm_cgpr32_set }, + [REGSET_TM_CFPR] = { + .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG, + .size = sizeof(double), .align = sizeof(double), + .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set + }, #endif }; -- cgit v1.2.3 From 8c13f5999997d36fc5fb296809efedc13c801704 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:38 +0800 Subject: powerpc/ptrace: Enable support for NT_PPC_CVMX This patch enables support for TM checkpointed VMX register set ELF core note NT_PPC_CVMX based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding a register set REGSET_CVMX in powerpc corresponding to the ELF core note section added. It implements the get, set and active functions for this new register set added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/include/uapi/asm/elf.h | 1 + arch/powerpc/kernel/ptrace.c | 158 ++++++++++++++++++++++++++++++++++++ 2 files changed, 159 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index c2d21d11c2d2..ecb4e84cb22c 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -91,6 +91,7 @@ #define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ #define ELF_NFPREG 33 /* includes fpscr */ +#define ELF_NVMX 34 /* includes all vector registers */ typedef unsigned long elf_greg_t64; typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index b216c397b84f..6e42137c0175 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -64,6 +64,8 @@ struct pt_regs_offset { {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} #define REG_OFFSET_END {.name = NULL, .offset = 0} +#define TVSO(f) (offsetof(struct thread_vr_state, f)) + static const struct pt_regs_offset regoffset_table[] = { GPR_OFFSET_NAME(0), GPR_OFFSET_NAME(1), @@ -1147,6 +1149,151 @@ static int tm_cfpr_set(struct task_struct *target, target->thread.fp_state.fpscr = buf[32]; return 0; } + +/** + * tm_cvmx_active - get active number of registers in CVMX + * @target: The target task. + * @regset: The user regset structure. + * + * This function checks for the active number of available + * regisers in checkpointed VMX category. + */ +static int tm_cvmx_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return 0; + + return regset->n; +} + +/** + * tm_cvmx_get - get CMVX registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy from. + * @ubuf: User buffer to copy into. + * + * This function gets in transaction checkpointed VMX registers. + * + * When the transaction is active 'vr_state' and 'vr_save' hold + * the checkpointed values for the current transaction to fall + * back on if it aborts in between. The userspace interface buffer + * layout is as follows. + * + * struct data { + * vector128 vr[32]; + * vector128 vscr; + * vector128 vrsave; + *}; + */ +static int tm_cvmx_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32])); + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + /* Flush the state */ + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.vr_state, 0, + 33 * sizeof(vector128)); + if (!ret) { + /* + * Copy out only the low-order word of vrsave. + */ + union { + elf_vrreg_t reg; + u32 word; + } vrsave; + memset(&vrsave, 0, sizeof(vrsave)); + vrsave.word = target->thread.vrsave; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, + 33 * sizeof(vector128), -1); + } + + return ret; +} + +/** + * tm_cvmx_set - set CMVX registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy into. + * @ubuf: User buffer to copy from. + * + * This function sets in transaction checkpointed VMX registers. + * + * When the transaction is active 'vr_state' and 'vr_save' hold + * the checkpointed values for the current transaction to fall + * back on if it aborts in between. The userspace interface buffer + * layout is as follows. + * + * struct data { + * vector128 vr[32]; + * vector128 vscr; + * vector128 vrsave; + *}; + */ +static int tm_cvmx_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32])); + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.vr_state, 0, + 33 * sizeof(vector128)); + if (!ret && count > 0) { + /* + * We use only the low-order word of vrsave. + */ + union { + elf_vrreg_t reg; + u32 word; + } vrsave; + memset(&vrsave, 0, sizeof(vrsave)); + vrsave.word = target->thread.vrsave; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, + 33 * sizeof(vector128), -1); + if (!ret) + target->thread.vrsave = vrsave.word; + } + + return ret; +} #endif /* @@ -1167,6 +1314,7 @@ enum powerpc_regset { #ifdef CONFIG_PPC_TRANSACTIONAL_MEM REGSET_TM_CGPR, /* TM checkpointed GPR registers */ REGSET_TM_CFPR, /* TM checkpointed FPR registers */ + REGSET_TM_CVMX, /* TM checkpointed VMX registers */ #endif }; @@ -1213,6 +1361,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(double), .align = sizeof(double), .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set }, + [REGSET_TM_CVMX] = { + .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX, + .size = sizeof(vector128), .align = sizeof(vector128), + .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set + }, #endif }; @@ -1450,6 +1603,11 @@ static const struct user_regset compat_regsets[] = { .size = sizeof(double), .align = sizeof(double), .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set }, + [REGSET_TM_CVMX] = { + .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX, + .size = sizeof(vector128), .align = sizeof(vector128), + .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set + }, #endif }; -- cgit v1.2.3 From 9d3918f7c0e516bb8782915cdb2f8cbdbf6c4f9b Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:39 +0800 Subject: powerpc/ptrace: Enable support for NT_PPC_CVSX This patch enables support for TM checkpointed VSX register set ELF core note NT_PPC_CVSX based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding a register set REGSET_CVSX in powerpc corresponding to the ELF core note section added. It implements the get, set and active functions for this new register set added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/include/uapi/asm/elf.h | 1 + arch/powerpc/kernel/ptrace.c | 129 ++++++++++++++++++++++++++++++++++++ 2 files changed, 130 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index ecb4e84cb22c..1549172aa258 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -92,6 +92,7 @@ #define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ #define ELF_NFPREG 33 /* includes fpscr */ #define ELF_NVMX 34 /* includes all vector registers */ +#define ELF_NVSX 32 /* includes all VSX registers */ typedef unsigned long elf_greg_t64; typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 6e42137c0175..df6afe7da4da 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -65,6 +65,7 @@ struct pt_regs_offset { #define REG_OFFSET_END {.name = NULL, .offset = 0} #define TVSO(f) (offsetof(struct thread_vr_state, f)) +#define TFSO(f) (offsetof(struct thread_fp_state, f)) static const struct pt_regs_offset regoffset_table[] = { GPR_OFFSET_NAME(0), @@ -1294,6 +1295,123 @@ static int tm_cvmx_set(struct task_struct *target, return ret; } + +/** + * tm_cvsx_active - get active number of registers in CVSX + * @target: The target task. + * @regset: The user regset structure. + * + * This function checks for the active number of available + * regisers in transaction checkpointed VSX category. + */ +static int tm_cvsx_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return 0; + + flush_vsx_to_thread(target); + return target->thread.used_vsr ? regset->n : 0; +} + +/** + * tm_cvsx_get - get CVSX registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy from. + * @ubuf: User buffer to copy into. + * + * This function gets in transaction checkpointed VSX registers. + * + * When the transaction is active 'fp_state' holds the checkpointed + * values for the current transaction to fall back on if it aborts + * in between. This function gets those checkpointed VSX registers. + * The userspace interface buffer layout is as follows. + * + * struct data { + * u64 vsx[32]; + *}; + */ +static int tm_cvsx_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + u64 buf[32]; + int ret, i; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + /* Flush the state */ + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + flush_vsx_to_thread(target); + + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + buf, 0, 32 * sizeof(double)); + + return ret; +} + +/** + * tm_cvsx_set - set CFPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy into. + * @ubuf: User buffer to copy from. + * + * This function sets in transaction checkpointed VSX registers. + * + * When the transaction is active 'fp_state' holds the checkpointed + * VSX register values for the current transaction to fall back on + * if it aborts in between. This function sets these checkpointed + * FPR registers. The userspace interface buffer layout is as follows. + * + * struct data { + * u64 vsx[32]; + *}; + */ +static int tm_cvsx_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + u64 buf[32]; + int ret, i; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + /* Flush the state */ + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + flush_vsx_to_thread(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + buf, 0, 32 * sizeof(double)); + for (i = 0; i < 32 ; i++) + target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; + + return ret; +} #endif /* @@ -1315,6 +1433,7 @@ enum powerpc_regset { REGSET_TM_CGPR, /* TM checkpointed GPR registers */ REGSET_TM_CFPR, /* TM checkpointed FPR registers */ REGSET_TM_CVMX, /* TM checkpointed VMX registers */ + REGSET_TM_CVSX, /* TM checkpointed VSX registers */ #endif }; @@ -1366,6 +1485,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(vector128), .align = sizeof(vector128), .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set }, + [REGSET_TM_CVSX] = { + .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX, + .size = sizeof(double), .align = sizeof(double), + .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set + }, #endif }; @@ -1608,6 +1732,11 @@ static const struct user_regset compat_regsets[] = { .size = sizeof(vector128), .align = sizeof(vector128), .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set }, + [REGSET_TM_CVSX] = { + .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX, + .size = sizeof(double), .align = sizeof(double), + .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set + }, #endif }; -- cgit v1.2.3 From 08e1c01d6aedf00af04d9571a0a5d5867298b719 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:40 +0800 Subject: powerpc/ptrace: Enable support for TM SPR state This patch enables support for TM SPR state related ELF core note NT_PPC_TM_SPR based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding a register set REGSET_TM_SPR in powerpc corresponding to the ELF core note section added. It implements the get, set and active functions for this new register set added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/include/uapi/asm/elf.h | 1 + arch/powerpc/kernel/ptrace.c | 143 +++++++++++++++++++++++++++++++++++- 2 files changed, 143 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index 1549172aa258..e703c646163b 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -93,6 +93,7 @@ #define ELF_NFPREG 33 /* includes fpscr */ #define ELF_NVMX 34 /* includes all vector registers */ #define ELF_NVSX 32 /* includes all VSX registers */ +#define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */ typedef unsigned long elf_greg_t64; typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index df6afe7da4da..f654224d2653 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -66,6 +66,7 @@ struct pt_regs_offset { #define TVSO(f) (offsetof(struct thread_vr_state, f)) #define TFSO(f) (offsetof(struct thread_fp_state, f)) +#define TSO(f) (offsetof(struct thread_struct, f)) static const struct pt_regs_offset regoffset_table[] = { GPR_OFFSET_NAME(0), @@ -1412,7 +1413,136 @@ static int tm_cvsx_set(struct task_struct *target, return ret; } -#endif + +/** + * tm_spr_active - get active number of registers in TM SPR + * @target: The target task. + * @regset: The user regset structure. + * + * This function checks the active number of available + * regisers in the transactional memory SPR category. + */ +static int tm_spr_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + return regset->n; +} + +/** + * tm_spr_get - get the TM related SPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy from. + * @ubuf: User buffer to copy into. + * + * This function gets transactional memory related SPR registers. + * The userspace interface buffer layout is as follows. + * + * struct { + * u64 tm_tfhar; + * u64 tm_texasr; + * u64 tm_tfiar; + * }; + */ +static int tm_spr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + /* Build tests */ + BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr)); + BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar)); + BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs)); + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + /* Flush the states */ + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + /* TFHAR register */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_tfhar, 0, sizeof(u64)); + + /* TEXASR register */ + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_texasr, sizeof(u64), + 2 * sizeof(u64)); + + /* TFIAR register */ + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_tfiar, + 2 * sizeof(u64), 3 * sizeof(u64)); + return ret; +} + +/** + * tm_spr_set - set the TM related SPR registers + * @target: The target task. + * @regset: The user regset structure. + * @pos: The buffer position. + * @count: Number of bytes to copy. + * @kbuf: Kernel buffer to copy into. + * @ubuf: User buffer to copy from. + * + * This function sets transactional memory related SPR registers. + * The userspace interface buffer layout is as follows. + * + * struct { + * u64 tm_tfhar; + * u64 tm_texasr; + * u64 tm_tfiar; + * }; + */ +static int tm_spr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + /* Build tests */ + BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr)); + BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar)); + BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs)); + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + /* Flush the states */ + flush_fp_to_thread(target); + flush_altivec_to_thread(target); + flush_tmregs_to_thread(target); + + /* TFHAR register */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_tfhar, 0, sizeof(u64)); + + /* TEXASR register */ + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_texasr, sizeof(u64), + 2 * sizeof(u64)); + + /* TFIAR register */ + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_tfiar, + 2 * sizeof(u64), 3 * sizeof(u64)); + return ret; +} +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* * These are our native regset flavors. @@ -1434,6 +1564,7 @@ enum powerpc_regset { REGSET_TM_CFPR, /* TM checkpointed FPR registers */ REGSET_TM_CVMX, /* TM checkpointed VMX registers */ REGSET_TM_CVSX, /* TM checkpointed VSX registers */ + REGSET_TM_SPR, /* TM specific SPR registers */ #endif }; @@ -1490,6 +1621,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(double), .align = sizeof(double), .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set }, + [REGSET_TM_SPR] = { + .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set + }, #endif }; @@ -1737,6 +1873,11 @@ static const struct user_regset compat_regsets[] = { .size = sizeof(double), .align = sizeof(double), .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set }, + [REGSET_TM_SPR] = { + .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set + }, #endif }; -- cgit v1.2.3 From c45dc9003a0722fdebf603cb63033046a70d24cd Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:41 +0800 Subject: powerpc/ptrace: Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR This patch enables support for all three TM checkpointed SPR states related ELF core note NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding three new register sets REGSET_TM_CTAR, REGSET_TM_CPPR and REGSET_TM_CDSCR in powerpc corresponding to the ELF core note sections added. It implements the get, set and active functions for all these new register sets added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 178 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index f654224d2653..a81e2d7cbdb9 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1542,6 +1542,151 @@ static int tm_spr_set(struct task_struct *target, 2 * sizeof(u64), 3 * sizeof(u64)); return ret; } + +static int tm_tar_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (MSR_TM_ACTIVE(target->thread.regs->msr)) + return regset->n; + + return 0; +} + +static int tm_tar_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_tar, 0, sizeof(u64)); + return ret; +} + +static int tm_tar_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_tar, 0, sizeof(u64)); + return ret; +} + +static int tm_ppr_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (MSR_TM_ACTIVE(target->thread.regs->msr)) + return regset->n; + + return 0; +} + + +static int tm_ppr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_ppr, 0, sizeof(u64)); + return ret; +} + +static int tm_ppr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_ppr, 0, sizeof(u64)); + return ret; +} + +static int tm_dscr_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (MSR_TM_ACTIVE(target->thread.regs->msr)) + return regset->n; + + return 0; +} + +static int tm_dscr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_dscr, 0, sizeof(u64)); + return ret; +} + +static int tm_dscr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_TM)) + return -ENODEV; + + if (!MSR_TM_ACTIVE(target->thread.regs->msr)) + return -ENODATA; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tm_dscr, 0, sizeof(u64)); + return ret; +} #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* @@ -1565,6 +1710,9 @@ enum powerpc_regset { REGSET_TM_CVMX, /* TM checkpointed VMX registers */ REGSET_TM_CVSX, /* TM checkpointed VSX registers */ REGSET_TM_SPR, /* TM specific SPR registers */ + REGSET_TM_CTAR, /* TM checkpointed TAR register */ + REGSET_TM_CPPR, /* TM checkpointed PPR register */ + REGSET_TM_CDSCR, /* TM checkpointed DSCR register */ #endif }; @@ -1626,6 +1774,21 @@ static const struct user_regset native_regsets[] = { .size = sizeof(u64), .align = sizeof(u64), .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set }, + [REGSET_TM_CTAR] = { + .core_note_type = NT_PPC_TM_CTAR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set + }, + [REGSET_TM_CPPR] = { + .core_note_type = NT_PPC_TM_CPPR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set + }, + [REGSET_TM_CDSCR] = { + .core_note_type = NT_PPC_TM_CDSCR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set + }, #endif }; @@ -1878,6 +2041,21 @@ static const struct user_regset compat_regsets[] = { .size = sizeof(u64), .align = sizeof(u64), .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set }, + [REGSET_TM_CTAR] = { + .core_note_type = NT_PPC_TM_CTAR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set + }, + [REGSET_TM_CPPR] = { + .core_note_type = NT_PPC_TM_CPPR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set + }, + [REGSET_TM_CDSCR] = { + .core_note_type = NT_PPC_TM_CDSCR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set + }, #endif }; -- cgit v1.2.3 From fa439810cc1b3c927ec24ede17d02467e1b143a1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:42 +0800 Subject: powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR This patch enables support for running TAR, PPR, DSCR registers related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding three new register sets REGSET_TAR, REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core note sections added in this regad. It implements the get, set and active functions for all these new register sets added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ptrace.c | 117 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index a81e2d7cbdb9..a1e166ae4b14 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1689,6 +1689,78 @@ static int tm_dscr_set(struct task_struct *target, } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +#ifdef CONFIG_PPC64 +static int ppr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.ppr, 0, sizeof(u64)); + return ret; +} + +static int ppr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.ppr, 0, sizeof(u64)); + return ret; +} + +static int dscr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.dscr, 0, sizeof(u64)); + return ret; +} +static int dscr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.dscr, 0, sizeof(u64)); + return ret; +} +#endif +#ifdef CONFIG_PPC_BOOK3S_64 +static int tar_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tar, 0, sizeof(u64)); + return ret; +} +static int tar_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tar, 0, sizeof(u64)); + return ret; +} +#endif /* * These are our native regset flavors. */ @@ -1714,6 +1786,13 @@ enum powerpc_regset { REGSET_TM_CPPR, /* TM checkpointed PPR register */ REGSET_TM_CDSCR, /* TM checkpointed DSCR register */ #endif +#ifdef CONFIG_PPC64 + REGSET_PPR, /* PPR register */ + REGSET_DSCR, /* DSCR register */ +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + REGSET_TAR, /* TAR register */ +#endif }; static const struct user_regset native_regsets[] = { @@ -1790,6 +1869,25 @@ static const struct user_regset native_regsets[] = { .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set }, #endif +#ifdef CONFIG_PPC64 + [REGSET_PPR] = { + .core_note_type = NT_PPC_PPR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .get = ppr_get, .set = ppr_set + }, + [REGSET_DSCR] = { + .core_note_type = NT_PPC_DSCR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .get = dscr_get, .set = dscr_set + }, +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + [REGSET_TAR] = { + .core_note_type = NT_PPC_TAR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .get = tar_get, .set = tar_set + }, +#endif }; static const struct user_regset_view user_ppc_native_view = { @@ -2057,6 +2155,25 @@ static const struct user_regset compat_regsets[] = { .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set }, #endif +#ifdef CONFIG_PPC64 + [REGSET_PPR] = { + .core_note_type = NT_PPC_PPR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .get = ppr_get, .set = ppr_set + }, + [REGSET_DSCR] = { + .core_note_type = NT_PPC_DSCR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .get = dscr_get, .set = dscr_set + }, +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + [REGSET_TAR] = { + .core_note_type = NT_PPC_TAR, .n = 1, + .size = sizeof(u64), .align = sizeof(u64), + .get = tar_get, .set = tar_set + }, +#endif }; static const struct user_regset_view user_ppc_compat_view = { -- cgit v1.2.3 From cf89d4e1b181bda27a5d52f4afd239ea07e84eb0 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:43 +0800 Subject: powerpc/ptrace: Enable support for EBB registers This patch enables support for EBB state registers related ELF core note NT_PPC_EBB based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding one new register sets REGSET_EBB in powerpc corresponding to the ELF core note sections added in this regard. It also implements the get, set and active functions for this new register sets added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/include/uapi/asm/elf.h | 1 + arch/powerpc/kernel/ptrace.c | 75 +++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index e703c646163b..8c4d71a59259 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -94,6 +94,7 @@ #define ELF_NVMX 34 /* includes all vector registers */ #define ELF_NVSX 32 /* includes all VSX registers */ #define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */ +#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */ typedef unsigned long elf_greg_t64; typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index a1e166ae4b14..7942f9c575ef 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1760,6 +1760,70 @@ static int tar_set(struct task_struct *target, &target->thread.tar, 0, sizeof(u64)); return ret; } + +static int ebb_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + if (target->thread.used_ebb) + return regset->n; + + return 0; +} + +static int ebb_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + /* Build tests */ + BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr)); + BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr)); + + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + if (!target->thread.used_ebb) + return -ENODATA; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.ebbrr, 0, 3 * sizeof(unsigned long)); +} + +static int ebb_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret = 0; + + /* Build tests */ + BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr)); + BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr)); + + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + if (target->thread.used_ebb) + return -ENODATA; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.ebbrr, 0, sizeof(unsigned long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.ebbhr, sizeof(unsigned long), + 2 * sizeof(unsigned long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.bescr, + 2 * sizeof(unsigned long), 3 * sizeof(unsigned long)); + + return ret; +} #endif /* * These are our native regset flavors. @@ -1792,6 +1856,7 @@ enum powerpc_regset { #endif #ifdef CONFIG_PPC_BOOK3S_64 REGSET_TAR, /* TAR register */ + REGSET_EBB, /* EBB registers */ #endif }; @@ -1887,6 +1952,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(u64), .align = sizeof(u64), .get = tar_get, .set = tar_set }, + [REGSET_EBB] = { + .core_note_type = NT_PPC_EBB, .n = ELF_NEBB, + .size = sizeof(u64), .align = sizeof(u64), + .active = ebb_active, .get = ebb_get, .set = ebb_set + }, #endif }; @@ -2173,6 +2243,11 @@ static const struct user_regset compat_regsets[] = { .size = sizeof(u64), .align = sizeof(u64), .get = tar_get, .set = tar_set }, + [REGSET_EBB] = { + .core_note_type = NT_PPC_EBB, .n = ELF_NEBB, + .size = sizeof(u64), .align = sizeof(u64), + .active = ebb_active, .get = ebb_get, .set = ebb_set + }, #endif }; -- cgit v1.2.3 From a67ae75802f178b0b790f1cd7f9c2954a85707fa Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 28 Jul 2016 10:57:44 +0800 Subject: powerpc/ptrace: Enable support for Performance Monitor registers This patch enables support for Performance monitor registers related ELF core note NT_PPC_PMU based ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved through adding one new register sets REGSET_PMU in powerpc corresponding to the ELF core note sections added in this regard. It also implements the get, set and active functions for this new register sets added. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- arch/powerpc/include/uapi/asm/elf.h | 3 +- arch/powerpc/kernel/ptrace.c | 75 +++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index 8c4d71a59259..3a9e44c45c78 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -94,7 +94,8 @@ #define ELF_NVMX 34 /* includes all vector registers */ #define ELF_NVSX 32 /* includes all VSX registers */ #define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */ -#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */ +#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */ +#define ELF_NPMU 5 /* includes siar, sdar, sier, mmcr2, mmcr0 */ typedef unsigned long elf_greg_t64; typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 7942f9c575ef..4f3c5756cc09 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1824,6 +1824,75 @@ static int ebb_set(struct task_struct *target, return ret; } +static int pmu_active(struct task_struct *target, + const struct user_regset *regset) +{ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + return regset->n; +} + +static int pmu_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + /* Build tests */ + BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar)); + BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier)); + BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2)); + BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0)); + + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.siar, 0, + 5 * sizeof(unsigned long)); +} + +static int pmu_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret = 0; + + /* Build tests */ + BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar)); + BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier)); + BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2)); + BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0)); + + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.siar, 0, + sizeof(unsigned long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.sdar, sizeof(unsigned long), + 2 * sizeof(unsigned long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.sier, 2 * sizeof(unsigned long), + 3 * sizeof(unsigned long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.mmcr2, 3 * sizeof(unsigned long), + 4 * sizeof(unsigned long)); + + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.mmcr0, 4 * sizeof(unsigned long), + 5 * sizeof(unsigned long)); + return ret; +} #endif /* * These are our native regset flavors. @@ -1857,6 +1926,7 @@ enum powerpc_regset { #ifdef CONFIG_PPC_BOOK3S_64 REGSET_TAR, /* TAR register */ REGSET_EBB, /* EBB registers */ + REGSET_PMR, /* Performance Monitor Registers */ #endif }; @@ -1957,6 +2027,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(u64), .align = sizeof(u64), .active = ebb_active, .get = ebb_get, .set = ebb_set }, + [REGSET_PMR] = { + .core_note_type = NT_PPC_PMU, .n = ELF_NPMU, + .size = sizeof(u64), .align = sizeof(u64), + .active = pmu_active, .get = pmu_get, .set = pmu_set + }, #endif }; -- cgit v1.2.3 From 2c0f99516f53911c3f2f81ab3815841e3408f11e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 2 Aug 2016 15:53:01 +1000 Subject: powerpc/32: Fix early access to cpu_spec relocation Commit 9402c6846131 ("powerpc: Factor do_feature_fixup calls") introduced a subtle bug on 32-bit. When reading the cpu spec from the global, we not only need to do a pointer relocation on the global address but also on the pointer we read from it. This fixes crashes reported on MPC5200 based machines. Fixes: 9402c6846131 ("powerpc: Factor do_feature_fixup calls") Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman --- arch/powerpc/lib/feature-fixups.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 077fa0ce3382..74145f02ad41 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -161,7 +161,7 @@ static unsigned long __initdata saved_firmware_features; void __init apply_feature_fixups(void) { - struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec); + struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec)); *PTRRELOC(&saved_cpu_features) = spec->cpu_features; *PTRRELOC(&saved_mmu_features) = spec->mmu_features; -- cgit v1.2.3 From 1a058f164348a71229afd35bb5bbbb0fb514555d Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Mon, 1 Aug 2016 00:58:39 +0530 Subject: powerpc/perf: Fix incorrect event codes in power9-event-list These have been changed in the hardware, update Linux's version. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/power9-events-list.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index cda6fcb809ca..6447dc1c3d89 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -34,15 +34,15 @@ EVENT(PM_L1_ICACHE_MISS, 0x200fd) /* Instruction Demand sectors wriittent into IL1 */ EVENT(PM_L1_DEMAND_WRITE, 0x0408c) /* Instruction prefetch written into IL1 */ -EVENT(PM_IC_PREF_WRITE, 0x0408e) +EVENT(PM_IC_PREF_WRITE, 0x0488c) /* The data cache was reloaded from local core's L3 due to a demand load */ EVENT(PM_DATA_FROM_L3, 0x4c042) /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ EVENT(PM_DATA_FROM_L3MISS, 0x300fe) /* All successful D-side store dispatches for this thread */ -EVENT(PM_L2_ST, 0x16081) +EVENT(PM_L2_ST, 0x16880) /* All successful D-side store dispatches for this thread that were L2 Miss */ -EVENT(PM_L2_ST_MISS, 0x26081) +EVENT(PM_L2_ST_MISS, 0x26880) /* Total HW L3 prefetches(Load+store) */ EVENT(PM_L3_PREF_ALL, 0x4e052) /* Data PTEG reload */ -- cgit v1.2.3 From eea8148c69f3aecbf297b12943a591467a1fb432 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 4 Aug 2016 15:32:06 +1000 Subject: powerpc/mm: Move register_process_table() out of ppc_md We want to initialise register_process_table() before ppc_md is setup, so that it can be called as part of MMU init (at least on Radix ATM). That no longer works because probe_machine() requires that ppc_md be empty before it's called, and we now do probe_machine() much later. So make register_process_table a global for now. It will probably move into a mmu_radix_ops struct at some point in the future. This was broken by me when applying commit 7025776ed1eb "powerpc/mm: Move hash table ops to a separate structure" due to conflicts with other patches. Fixes: 7025776ed1eb ("powerpc/mm: Move hash table ops to a separate structure") Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 4 ++++ arch/powerpc/include/asm/machdep.h | 2 -- arch/powerpc/mm/hash_native_64.c | 2 +- arch/powerpc/mm/pgtable-book3s64.c | 3 +++ arch/powerpc/mm/pgtable-radix.c | 4 ++-- 5 files changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 0cbde6a6750b..8afb0e00f7d9 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -134,5 +134,9 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, return hash__setup_initial_memory_limit(first_memblock_base, first_memblock_size); } + +extern int (*register_process_table)(unsigned long base, unsigned long page_size, + unsigned long tbl_size); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 76f5398e7152..0420b388dd83 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -219,8 +219,6 @@ struct machdep_calls { #ifdef CONFIG_ARCH_RANDOM int (*get_random_seed)(unsigned long *v); #endif - int (*register_process_table)(unsigned long base, unsigned long page_size, - unsigned long tbl_size); }; extern void e500_idle(void); diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index d2d8efd79cbf..0e4e9654bd2c 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -747,5 +747,5 @@ void __init hpte_init_native(void) mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; if (cpu_has_feature(CPU_FTR_ARCH_300)) - ppc_md.register_process_table = native_register_proc_table; + register_process_table = native_register_proc_table; } diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 7bb8acffe876..34079302cc17 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -14,6 +14,9 @@ #include "mmu_decl.h" #include +int (*register_process_table)(unsigned long base, unsigned long page_size, + unsigned long tbl_size); + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is called when relaxing access to a hugepage. It's also called in the page diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index f34ccdbe0fbd..af897d91d09f 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -171,7 +171,7 @@ redo: * of process table here. But our linear mapping also enable us to use * physical address here. */ - ppc_md.register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); + register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); } @@ -198,7 +198,7 @@ static void __init radix_init_partition_table(void) void __init radix_init_native(void) { - ppc_md.register_process_table = native_register_process_table; + register_process_table = native_register_process_table; } static int __init get_idx_from_shift(unsigned int shift) -- cgit v1.2.3