summaryrefslogtreecommitdiff
path: root/arch/um/include/asm
diff options
context:
space:
mode:
authorTiwei Bie <tiwei.btw@antgroup.com>2024-10-11 18:23:53 +0800
committerJohannes Berg <johannes.berg@intel.com>2024-10-23 09:52:49 +0200
commit2717c6b649e1840328c2758a478bf4034a22ac3e (patch)
treee25a72d6d3a34140f62d645ba72c6ae78cbedb65 /arch/um/include/asm
parent90daca7c8f6f33e9482591446d2e76b18a21be49 (diff)
um: Abandon the _PAGE_NEWPROT bit
When a PTE is updated in the page table, the _PAGE_NEWPAGE bit will always be set. And the corresponding page will always be mapped or unmapped depending on whether the PTE is present or not. The check on the _PAGE_NEWPROT bit is not really reachable. Abandoning it will allow us to simplify the code and remove the unreachable code. Reviewed-by: Benjamin Berg <benjamin.berg@intel.com> Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com> Link: https://patch.msgid.link/20241011102354.1682626-2-tiwei.btw@antgroup.com Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'arch/um/include/asm')
-rw-r--r--arch/um/include/asm/pgtable.h37
-rw-r--r--arch/um/include/asm/tlbflush.h4
2 files changed, 8 insertions, 33 deletions
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index bd7a9593705f..6b1317400190 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -12,7 +12,6 @@
#define _PAGE_PRESENT 0x001
#define _PAGE_NEWPAGE 0x002
-#define _PAGE_NEWPROT 0x004
#define _PAGE_RW 0x020
#define _PAGE_USER 0x040
#define _PAGE_ACCESSED 0x080
@@ -151,23 +150,12 @@ static inline int pte_newpage(pte_t pte)
return pte_get_bits(pte, _PAGE_NEWPAGE);
}
-static inline int pte_newprot(pte_t pte)
-{
- return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
-}
-
/*
* =================================
* Flags setting section.
* =================================
*/
-static inline pte_t pte_mknewprot(pte_t pte)
-{
- pte_set_bits(pte, _PAGE_NEWPROT);
- return(pte);
-}
-
static inline pte_t pte_mkclean(pte_t pte)
{
pte_clear_bits(pte, _PAGE_DIRTY);
@@ -182,19 +170,14 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte)
{
- if (likely(pte_get_bits(pte, _PAGE_RW)))
- pte_clear_bits(pte, _PAGE_RW);
- else
- return pte;
- return(pte_mknewprot(pte));
+ pte_clear_bits(pte, _PAGE_RW);
+ return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
- if (unlikely(pte_get_bits(pte, _PAGE_USER)))
- return pte;
pte_set_bits(pte, _PAGE_USER);
- return(pte_mknewprot(pte));
+ return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
@@ -211,18 +194,14 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
- if (unlikely(pte_get_bits(pte, _PAGE_RW)))
- return pte;
pte_set_bits(pte, _PAGE_RW);
- return(pte_mknewprot(pte));
+ return pte;
}
static inline pte_t pte_mkuptodate(pte_t pte)
{
pte_clear_bits(pte, _PAGE_NEWPAGE);
- if(pte_present(pte))
- pte_clear_bits(pte, _PAGE_NEWPROT);
- return(pte);
+ return pte;
}
static inline pte_t pte_mknewpage(pte_t pte)
@@ -236,12 +215,10 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
pte_copy(*pteptr, pteval);
/* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
- * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
- * mapped pages.
+ * update_pte_range knows to unmap it.
*/
*pteptr = pte_mknewpage(*pteptr);
- if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
}
#define PFN_PTE_SHIFT PAGE_SHIFT
@@ -298,8 +275,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
({ pte_t pte; \
\
pte_set_val(pte, page_to_phys(page), (pgprot)); \
- if (pte_present(pte)) \
- pte_mknewprot(pte_mknewpage(pte)); \
pte;})
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
diff --git a/arch/um/include/asm/tlbflush.h b/arch/um/include/asm/tlbflush.h
index db997976b6ea..13a3009942be 100644
--- a/arch/um/include/asm/tlbflush.h
+++ b/arch/um/include/asm/tlbflush.h
@@ -9,8 +9,8 @@
#include <linux/mm.h>
/*
- * In UML, we need to sync the TLB over by using mmap/munmap/mprotect syscalls
- * from the process handling the MM (which can be the kernel itself).
+ * In UML, we need to sync the TLB over by using mmap/munmap syscalls from
+ * the process handling the MM (which can be the kernel itself).
*
* To track updates, we can hook into set_ptes and flush_tlb_*. With set_ptes
* we catch all PTE transitions where memory that was unusable becomes usable.