summaryrefslogtreecommitdiff
path: root/arch/s390/mm/pageattr.c
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2023-04-14 14:30:44 +0200
committerVasily Gorbik <gor@linux.ibm.com>2023-04-20 11:36:29 +0200
commit0490d6d7ba0a479fdd805da54ae25220ce5b514d (patch)
treeea909a411bcdc09f10a4070b3a0d1a9d15f37907 /arch/s390/mm/pageattr.c
parent17c51b1ba9c2d4b497349ac1622aafe67be16103 (diff)
s390/mm: enable ARCH_HAS_SET_DIRECT_MAP
Implement the set_direct_map_*() API, which allows to invalidate and set default permissions to pages within the direct mapping. Note that kernel_page_present(), which is also supposed to be part of this API, is intentionally not implemented. The reason for this is that kernel_page_present() is only used (and currently only makes sense) for suspend/resume, which isn't supported on s390. Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/pageattr.c')
-rw-r--r--arch/s390/mm/pageattr.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 77f31791044d..0b196dea2d92 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -4,6 +4,7 @@
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@@ -101,6 +102,14 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pte(pte_val(new) & PAGE_MASK);
+ new = set_pte_bit(new, PAGE_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+ }
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
@@ -151,6 +160,14 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pmd(pmd_val(new) & PMD_MASK);
+ new = set_pmd_bit(new, SEGMENT_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+ }
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
@@ -232,6 +249,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pud(pud_val(new) & PUD_MASK);
+ new = set_pud_bit(new, REGION3_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+ }
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
@@ -325,6 +350,16 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
}
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
+}
+
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static void ipte_range(pte_t *pte, unsigned long address, int nr)