From 8aa989b8fba1428b50a1be771c01285f1de0227b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Jan 2015 16:48:03 +1100 Subject: powerpc: Remove some unused functions Remove slice_set_psize() which is not used. It was added in 3a8247cc2c85 "powerpc: Only demote individual slices rather than whole process" but was never used. Remove vsx_assist_exception() which is not used. It was added in ce48b2100785 "powerpc: Add VSX context save/restore, ptrace and signal support" but was never used. Remove generic_mach_cpu_die() which is not used. Its last caller was removed in 375f561a4131 "powerpc/powernv: Always go into nap mode when CPU is offline". Remove mpc7448_hpc2_power_off() and mpc7448_hpc2_halt() which are unused. These were introduced in c5d56332fd6c "[POWERPC] Add general support for mpc7448hpc2 (Taiga) platform" but were never used. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist [mpe: Update changelog with details on when/why they are unused] Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 29 ----------------------------- 1 file changed, 29 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index ded0ea1afde4..0f432a702870 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -645,35 +645,6 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) spin_unlock_irqrestore(&slice_convert_lock, flags); } -void slice_set_psize(struct mm_struct *mm, unsigned long address, - unsigned int psize) -{ - unsigned char *hpsizes; - unsigned long i, flags; - u64 *lpsizes; - - spin_lock_irqsave(&slice_convert_lock, flags); - if (address < SLICE_LOW_TOP) { - i = GET_LOW_SLICE_INDEX(address); - lpsizes = &mm->context.low_slices_psize; - *lpsizes = (*lpsizes & ~(0xful << (i * 4))) | - ((unsigned long) psize << (i * 4)); - } else { - int index, mask_index; - i = GET_HIGH_SLICE_INDEX(address); - hpsizes = mm->context.high_slices_psize; - mask_index = i & 0x1; - index = i >> 1; - hpsizes[index] = (hpsizes[index] & - ~(0xf << (mask_index * 4))) | - (((unsigned long)psize) << (mask_index * 4)); - } - - spin_unlock_irqrestore(&slice_convert_lock, flags); - - copro_flush_all_slbs(mm); -} - void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize) { -- cgit v1.2.3 From 238cac16c03eef00bcb607e09defee79dadca958 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 16:22:52 -0600 Subject: powerpc: Remove duplicate tlbcam_index declarations They seem to be leftovers from '14cf11a powerpc: Merge enough to start building in arch/powerpc' Signed-off-by: Emil Medve Signed-off-by: Scott Wood --- arch/powerpc/mm/fsl_booke_mmu.c | 2 -- arch/powerpc/mm/pgtable_32.c | 1 - 2 files changed, 3 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 94cd728166d3..b46912fee7cd 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -67,8 +67,6 @@ struct tlbcamrange { phys_addr_t phys; } tlbcam_addrs[NUM_TLBCAMS]; -extern unsigned int tlbcam_index; - unsigned long tlbcam_sz(int idx) { return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 50fad3801f30..6eec88685a71 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -63,7 +63,6 @@ void setbat(int index, unsigned long virt, phys_addr_t phys, #endif /* HAVE_BATS */ #ifdef HAVE_TLBCAM -extern unsigned int tlbcam_index; extern phys_addr_t v_mapped_by_tlbcam(unsigned long va); extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); #else /* !HAVE_TLBCAM */ -- cgit v1.2.3 From a7b9f671f2d141528491c346e21e8a179cee9d21 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Mon, 19 Jan 2015 17:04:38 +0100 Subject: powerpc32: adds handling of _PAGE_RO Some powerpc like the 8xx don't have a RW bit in PTE bits but a RO (Read Only) bit. This patch implements the handling of a _PAGE_RO flag to be used in place of _PAGE_RW Signed-off-by: Christophe Leroy [scottwood@freescale.com: fix whitespace] Signed-off-by: Scott Wood --- arch/powerpc/include/asm/pgtable-ppc32.h | 8 +++++--- arch/powerpc/include/asm/pgtable.h | 7 +++++-- arch/powerpc/include/asm/pte-common.h | 25 +++++++++++++++++-------- arch/powerpc/mm/pgtable_32.c | 2 +- 4 files changed, 28 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 234e07c47803..62a3e49a9a14 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -275,7 +275,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); + pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -286,9 +286,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) { - unsigned long bits = pte_val(entry) & + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - pte_update(ptep, 0, bits); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + + pte_update(ptep, clr, set); } #define __HAVE_ARCH_PTE_SAME diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index a8805fee0df9..7e77f2ca5132 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -30,7 +30,8 @@ struct mm_struct; #include /* Generic accessors to PTE bits */ -static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } +static inline int pte_write(pte_t pte) +{ return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } @@ -115,12 +116,14 @@ static inline unsigned long pte_pfn(pte_t pte) { /* Generic modifiers for PTE bits */ static inline pte_t pte_wrprotect(pte_t pte) { - pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } + pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); + pte_val(pte) |= _PAGE_RO; return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { + pte_val(pte) &= ~_PAGE_RO; pte_val(pte) |= _PAGE_RW; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index e040c3595129..2aef9b7a0eb2 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -34,6 +34,12 @@ #ifndef _PAGE_PSIZE #define _PAGE_PSIZE 0 #endif +/* _PAGE_RO and _PAGE_RW shall not be defined at the same time */ +#ifndef _PAGE_RO +#define _PAGE_RO 0 +#else +#define _PAGE_RW 0 +#endif #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif @@ -42,10 +48,10 @@ #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif #ifndef _PAGE_KERNEL_RO -#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_RO (_PAGE_RO) #endif #ifndef _PAGE_KERNEL_ROX -#define _PAGE_KERNEL_ROX (_PAGE_EXEC) +#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO) #endif #ifndef _PAGE_KERNEL_RW #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) @@ -95,7 +101,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); /* Mask of bits returned by pte_pgprot() */ #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ - _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) #ifdef CONFIG_NUMA_BALANCING @@ -128,11 +134,14 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); */ #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \ + _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \ + _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \ + _PAGE_EXEC) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6eec88685a71..833139620431 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -146,7 +146,7 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { /* writeable implies dirty for kernel addresses */ - if (flags & _PAGE_RW) + if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO) flags |= _PAGE_DIRTY | _PAGE_HWWRITE; /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ -- cgit v1.2.3 From debddd95ec0bb8ac1bbe719b5e4588e453c7b2fc Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Mon, 19 Jan 2015 16:44:42 +0100 Subject: powerpc/8xx: reduce pressure on TLB due to context switches For nohash powerpc, when we run out of contexts, contexts are freed by stealing used contexts in-turn. When a victim has been selected, the associated TLB entries are freed using _tlbil_pid(). Unfortunatly, on the PPC 8xx, _tlbil_pid() does a tlbia, hence flushes ALL TLB entries and not only the one linked to the stolen context. Therefore, as implented today, at each task switch requiring a new context, all entries are flushed. This patch modifies the implementation so that when running out of contexts, all contexts get freed at once, hence dividing the number of calls to tlbia by 16. Signed-off-by: Christophe Leroy Signed-off-by: Scott Wood --- arch/powerpc/mm/mmu_context_nohash.c | 43 +++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 9cba6cba2e50..986afbc22c76 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -52,12 +52,15 @@ #include #include +#include "mmu_decl.h" + static unsigned int first_context, last_context; static unsigned int next_context, nr_free_contexts; static unsigned long *context_map; static unsigned long *stale_map[NR_CPUS]; static struct mm_struct **context_mm; static DEFINE_RAW_SPINLOCK(context_lock); +static bool no_selective_tlbil; #define CTX_MAP_SIZE \ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) @@ -133,6 +136,38 @@ static unsigned int steal_context_smp(unsigned int id) } #endif /* CONFIG_SMP */ +static unsigned int steal_all_contexts(void) +{ + struct mm_struct *mm; + int cpu = smp_processor_id(); + unsigned int id; + + for (id = first_context; id <= last_context; id++) { + /* Pick up the victim mm */ + mm = context_mm[id]; + + pr_hardcont(" | steal %d from 0x%p", id, mm); + + /* Mark this mm as having no context anymore */ + mm->context.id = MMU_NO_CONTEXT; + if (id != first_context) { + context_mm[id] = NULL; + __clear_bit(id, context_map); +#ifdef DEBUG_MAP_CONSISTENCY + mm->context.active = 0; +#endif + } + __clear_bit(id, stale_map[cpu]); + } + + /* Flush the TLB for all contexts (not to be used on SMP) */ + _tlbil_all(); + + nr_free_contexts = last_context - first_context; + + return first_context; +} + /* Note that this will also be called on SMP if all other CPUs are * offlined, which means that it may be called for cpu != 0. For * this to work, we somewhat assume that CPUs that are onlined @@ -241,7 +276,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) goto stolen; } #endif /* CONFIG_SMP */ - id = steal_context_up(id); + if (no_selective_tlbil) + id = steal_all_contexts(); + else + id = steal_context_up(id); goto stolen; } nr_free_contexts--; @@ -407,12 +445,15 @@ void __init mmu_context_init(void) if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { first_context = 0; last_context = 15; + no_selective_tlbil = true; } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; last_context = 65535; + no_selective_tlbil = false; } else { first_context = 1; last_context = 255; + no_selective_tlbil = false; } #ifdef DEBUG_CLAMP_LAST_CONTEXT -- cgit v1.2.3 From ce67f5d0a00cce231e62334c3624737623c32d6a Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Tue, 20 Jan 2015 10:57:34 +0100 Subject: powerpc32: Use kmem_cache memory for PGDIR When pages are not 4K, PGDIR table is allocated with kmalloc(). In order to optimise TLB handlers, aligned memory is needed. kmalloc() doesn't provide aligned memory blocks, so lets use a kmem_cache pool instead. Signed-off-by: Christophe Leroy Signed-off-by: Scott Wood --- arch/powerpc/include/asm/pgtable-ppc32.h | 4 ++++ arch/powerpc/mm/pgtable_32.c | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 9cde3c1522e3..26ce0ab0a9e4 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -347,10 +347,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) #define pte_to_pgoff(pte) (pte_val(pte) >> 3) #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) +#ifndef CONFIG_PPC_4K_PAGES +void pgtable_cache_init(void); +#else /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) +#endif extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 833139620431..03b1a3b0fbd5 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -72,13 +72,25 @@ extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) +#ifndef CONFIG_PPC_4K_PAGES +static struct kmem_cache *pgtable_cache; + +void pgtable_cache_init(void) +{ + pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER, + 1 << PGDIR_ORDER, 0, NULL); + if (pgtable_cache == NULL) + panic("Couldn't allocate pgtable caches"); +} +#endif + pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; /* pgdir take page or two with 4K pages and a page fraction otherwise */ #ifndef CONFIG_PPC_4K_PAGES - ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); + ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); #else ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER - PAGE_SHIFT); @@ -89,7 +101,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) void pgd_free(struct mm_struct *mm, pgd_t *pgd) { #ifndef CONFIG_PPC_4K_PAGES - kfree((void *)pgd); + kmem_cache_free(pgtable_cache, (void *)pgd); #else free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); #endif -- cgit v1.2.3 From 0dc294f717d41bfbafc746a7a96a7bc0f114c20c Mon Sep 17 00:00:00 2001 From: Arseny Solokha Date: Fri, 30 Jan 2015 19:08:27 +0700 Subject: powerpc/mm: bail out early when flushing TLB page MMU_NO_CONTEXT is conditionally defined as 0 or (unsigned int)-1. However, in __flush_tlb_page() a corresponding variable is only tested for open coded 0, which can cause NULL pointer dereference if `mm' argument was legitimately passed as such. Bail out early in case the first argument is NULL, thus eliminate confusion between different values of MMU_NO_CONTEXT and avoid disabling and then re-enabling preemption unnecessarily. Signed-off-by: Arseny Solokha Signed-off-by: Scott Wood --- arch/powerpc/mm/tlb_nohash.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index f38ea4df6a85..ab0616b0e6c2 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -284,8 +284,11 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, struct cpumask *cpu_mask; unsigned int pid; + if (unlikely(!mm)) + return; + preempt_disable(); - pid = mm ? mm->context.id : 0; + pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; cpu_mask = mm_cpumask(mm); -- cgit v1.2.3 From c2c896bee08e1461fc24f9bf7dd57e2c63f6db70 Mon Sep 17 00:00:00 2001 From: Arseny Solokha Date: Wed, 4 Feb 2015 13:18:02 +1100 Subject: powerpc/mm: Warn on flushing tlb page in kernel context Function __flush_tlb_page() must only be called for user contexts, so put in extra hardening to warn on calling it for kernel context. Signed-off-by: Arseny Solokha Signed-off-by: Michael Ellerman --- arch/powerpc/mm/tlb_nohash.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index ab0616b0e6c2..cbd3d069897f 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -284,7 +284,11 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, struct cpumask *cpu_mask; unsigned int pid; - if (unlikely(!mm)) + /* + * This function as well as __local_flush_tlb_page() must only be called + * for user contexts. + */ + if (unlikely(WARN_ON(!mm))) return; preempt_disable(); -- cgit v1.2.3