summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 18:15:38 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 18:15:38 -0800
commitd3f180ea1a44aecba1b0dab2a253428e77f906bf (patch)
tree0be6eaf1eb3fd32c934bd070a3d758696f417c93 /arch/powerpc/mm
parent6b00f7efb5303418c231994c91fb8239f5ada260 (diff)
parenta6130ed253a931d2169c26ab0958d81b0dce4d6e (diff)
Merge tag 'powerpc-3.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman: - Update of all defconfigs - Addition of a bunch of config options to modernise our defconfigs - Some PS3 updates from Geoff - Optimised memcmp for 64 bit from Anton - Fix for kprobes that allows 'perf probe' to work from Naveen - Several cxl updates from Ian & Ryan - Expanded support for the '24x7' PMU from Cody & Sukadev - Freescale updates from Scott: "Highlights include 8xx optimizations, some more work on datapath device tree content, e300 machine check support, t1040 corenet error reporting, and various cleanups and fixes" * tag 'powerpc-3.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (102 commits) cxl: Add missing return statement after handling AFU errror cxl: Fail AFU initialisation if an invalid configuration record is found cxl: Export optional AFU configuration record in sysfs powerpc/mm: Warn on flushing tlb page in kernel context powerpc/powernv: Add OPAL soft-poweroff routine powerpc/perf/hv-24x7: Document sysfs event description entries powerpc/perf/hv-gpci: add the remaining gpci requests powerpc/perf/{hv-gpci, hv-common}: generate requests with counters annotated powerpc/perf/hv-24x7: parse catalog and populate sysfs with events perf: define EVENT_DEFINE_RANGE_FORMAT_LITE helper perf: add PMU_EVENT_ATTR_STRING() helper perf: provide sysfs_show for struct perf_pmu_events_attr powerpc/kernel: Avoid initializing device-tree pointer twice powerpc: Remove old compile time disabled syscall tracing code powerpc/kernel: Make syscall_exit a local label cxl: Fix device_node reference counting powerpc/mm: bail out early when flushing TLB page powerpc: defconfigs: add MTD_SPI_NOR (new dependency for M25P80) perf/powerpc: reset event hw state when adding it to the PMU powerpc/qe: Use strlcpy() ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c43
-rw-r--r--arch/powerpc/mm/pgtable_32.c19
-rw-r--r--arch/powerpc/mm/slice.c29
-rw-r--r--arch/powerpc/mm/tlb_nohash.c9
5 files changed, 65 insertions, 37 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 94cd728166d3..b46912fee7cd 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -67,8 +67,6 @@ struct tlbcamrange {
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
-extern unsigned int tlbcam_index;
-
unsigned long tlbcam_sz(int idx)
{
return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 9cba6cba2e50..986afbc22c76 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -52,12 +52,15 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include "mmu_decl.h"
+
static unsigned int first_context, last_context;
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
+static bool no_selective_tlbil;
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -133,6 +136,38 @@ static unsigned int steal_context_smp(unsigned int id)
}
#endif /* CONFIG_SMP */
+static unsigned int steal_all_contexts(void)
+{
+ struct mm_struct *mm;
+ int cpu = smp_processor_id();
+ unsigned int id;
+
+ for (id = first_context; id <= last_context; id++) {
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ pr_hardcont(" | steal %d from 0x%p", id, mm);
+
+ /* Mark this mm as having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+ if (id != first_context) {
+ context_mm[id] = NULL;
+ __clear_bit(id, context_map);
+#ifdef DEBUG_MAP_CONSISTENCY
+ mm->context.active = 0;
+#endif
+ }
+ __clear_bit(id, stale_map[cpu]);
+ }
+
+ /* Flush the TLB for all contexts (not to be used on SMP) */
+ _tlbil_all();
+
+ nr_free_contexts = last_context - first_context;
+
+ return first_context;
+}
+
/* Note that this will also be called on SMP if all other CPUs are
* offlined, which means that it may be called for cpu != 0. For
* this to work, we somewhat assume that CPUs that are onlined
@@ -241,7 +276,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
goto stolen;
}
#endif /* CONFIG_SMP */
- id = steal_context_up(id);
+ if (no_selective_tlbil)
+ id = steal_all_contexts();
+ else
+ id = steal_context_up(id);
goto stolen;
}
nr_free_contexts--;
@@ -407,12 +445,15 @@ void __init mmu_context_init(void)
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0;
last_context = 15;
+ no_selective_tlbil = true;
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
+ no_selective_tlbil = false;
} else {
first_context = 1;
last_context = 255;
+ no_selective_tlbil = false;
}
#ifdef DEBUG_CLAMP_LAST_CONTEXT
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 50fad3801f30..03b1a3b0fbd5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -63,7 +63,6 @@ void setbat(int index, unsigned long virt, phys_addr_t phys,
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
-extern unsigned int tlbcam_index;
extern phys_addr_t v_mapped_by_tlbcam(unsigned long va);
extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#else /* !HAVE_TLBCAM */
@@ -73,13 +72,25 @@ extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
+#ifndef CONFIG_PPC_4K_PAGES
+static struct kmem_cache *pgtable_cache;
+
+void pgtable_cache_init(void)
+{
+ pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
+ 1 << PGDIR_ORDER, 0, NULL);
+ if (pgtable_cache == NULL)
+ panic("Couldn't allocate pgtable caches");
+}
+#endif
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
- ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+ ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
@@ -90,7 +101,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifndef CONFIG_PPC_4K_PAGES
- kfree((void *)pgd);
+ kmem_cache_free(pgtable_cache, (void *)pgd);
#else
free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
#endif
@@ -147,7 +158,7 @@ void __iomem *
ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
/* writeable implies dirty for kernel addresses */
- if (flags & _PAGE_RW)
+ if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index ded0ea1afde4..0f432a702870 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -645,35 +645,6 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
spin_unlock_irqrestore(&slice_convert_lock, flags);
}
-void slice_set_psize(struct mm_struct *mm, unsigned long address,
- unsigned int psize)
-{
- unsigned char *hpsizes;
- unsigned long i, flags;
- u64 *lpsizes;
-
- spin_lock_irqsave(&slice_convert_lock, flags);
- if (address < SLICE_LOW_TOP) {
- i = GET_LOW_SLICE_INDEX(address);
- lpsizes = &mm->context.low_slices_psize;
- *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
- ((unsigned long) psize << (i * 4));
- } else {
- int index, mask_index;
- i = GET_HIGH_SLICE_INDEX(address);
- hpsizes = mm->context.high_slices_psize;
- mask_index = i & 0x1;
- index = i >> 1;
- hpsizes[index] = (hpsizes[index] &
- ~(0xf << (mask_index * 4))) |
- (((unsigned long)psize) << (mask_index * 4));
- }
-
- spin_unlock_irqrestore(&slice_convert_lock, flags);
-
- copro_flush_all_slbs(mm);
-}
-
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index f38ea4df6a85..cbd3d069897f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -284,8 +284,15 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
struct cpumask *cpu_mask;
unsigned int pid;
+ /*
+ * This function as well as __local_flush_tlb_page() must only be called
+ * for user contexts.
+ */
+ if (unlikely(WARN_ON(!mm)))
+ return;
+
preempt_disable();
- pid = mm ? mm->context.id : 0;
+ pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
cpu_mask = mm_cpumask(mm);