summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-03-22 09:07:00 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2017-04-01 21:12:27 +1100
commit82228e362f9b7f4b876d0fbb1036c235797c6b1d (patch)
treeba7432ffa3577cd4cadb5bd41e1fc8ac21e4a7d3 /arch/powerpc/mm
parentbb1832217a859f6dbe4a45ff2ba7fdcab0bb3958 (diff)
powerpc/pseries: Skip using reserved virtual address range
Now that we use all the available virtual address range, we need to make sure we don't generate VSID such that it overlaps with the reserved vsid range. Reserved vsid range include the virtual address range used by the adjunct partition and also the VRMA virtual segment. We find the context value that can result in generating such a VSID and reserve it early in boot. We don't look at the adjunct range, because for now we disable the adjunct usage in a Linux LPAR via CAS interface. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> [mpe: Rewrite hash__reserve_context_id(), move the rest into pseries] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c16
2 files changed, 16 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 716255f88bbd..8848fec51ce9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1868,5 +1868,4 @@ static int __init hash64_debugfs(void)
return 0;
}
machine_device_initcall(pseries, hash64_debugfs);
-
#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index fd0bc6db2dcd..7bc5b63034db 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -57,6 +57,22 @@ again:
return index;
}
+void hash__reserve_context_id(int id)
+{
+ int rc, result = 0;
+
+ do {
+ if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
+ break;
+
+ spin_lock(&mmu_context_lock);
+ rc = ida_get_new_above(&mmu_context_ida, id, &result);
+ spin_unlock(&mmu_context_lock);
+ } while (rc == -EAGAIN);
+
+ WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
+}
+
int hash__alloc_context_id(void)
{
unsigned long max;