summaryrefslogtreecommitdiff
path: root/arch/um/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-30 10:34:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-30 10:34:54 -0800
commit831c1926ee728c3e747255f7c0f434762e8e863d (patch)
tree92d1c73842c3f95b362810aba2080b7582c12c11 /arch/um/include/asm
parent04b43ea325d21c4c98e831383a1b7d540721898a (diff)
parentbed2cc482600296fe04edbc38005ba2851449c10 (diff)
Merge tag 'uml-for-linus-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux
Pull UML updates from Richard Weinberger: - Lots of cleanups, mostly from Benjamin Berg and Tiwei Bie - Removal of unused code - Fix for sparse warnings - Cleanup around stub_exe() * tag 'uml-for-linus-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux: (68 commits) hostfs: Fix the NULL vs IS_ERR() bug for __filemap_get_folio() um: move thread info into task um: Always dump trace for specified task in show_stack um: vector: Do not use drvdata in release um: net: Do not use drvdata in release um: ubd: Do not use drvdata in release um: ubd: Initialize ubd's disk pointer in ubd_add um: virtio_uml: query the number of vqs if supported um: virtio_uml: fix call_fd IRQ allocation um: virtio_uml: send SET_MEM_TABLE message with the exact size um: remove broken double fault detection um: remove duplicate UM_NSEC_PER_SEC definition um: remove file sync for stub data um: always include kconfig.h and compiler-version.h um: set DONTDUMP and DONTFORK flags on KASAN shadow memory um: fix sparse warnings in signal code um: fix sparse warnings from regset refactor um: Remove double zero check um: fix stub exe build with CONFIG_GCOV um: Use os_set_pdeathsig helper in winch thread/process ...
Diffstat (limited to 'arch/um/include/asm')
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/current.h23
-rw-r--r--arch/um/include/asm/page.h34
-rw-r--r--arch/um/include/asm/pgalloc.h11
-rw-r--r--arch/um/include/asm/pgtable-2level.h2
-rw-r--r--arch/um/include/asm/pgtable-4level.h (renamed from arch/um/include/asm/pgtable-3level.h)59
-rw-r--r--arch/um/include/asm/pgtable.h83
-rw-r--r--arch/um/include/asm/processor-generic.h7
-rw-r--r--arch/um/include/asm/thread_info.h18
-rw-r--r--arch/um/include/asm/tlbflush.h4
10 files changed, 117 insertions, 125 deletions
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 18f902da8e99..428f2c5158c2 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += bug.h
generic-y += compat.h
-generic-y += current.h
generic-y += device.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
diff --git a/arch/um/include/asm/current.h b/arch/um/include/asm/current.h
new file mode 100644
index 000000000000..de64e032d66c
--- /dev/null
+++ b/arch/um/include/asm/current.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+#include <linux/threads.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+extern struct task_struct *cpu_tasks[NR_CPUS];
+
+static __always_inline struct task_struct *get_current(void)
+{
+ return cpu_tasks[0];
+}
+
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
index 834313ecd3d6..3d516f3ca9c7 100644
--- a/arch/um/include/asm/page.h
+++ b/arch/um/include/asm/page.h
@@ -29,51 +29,35 @@ struct page;
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
-
typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(p) ((p).pte)
-#define pte_get_bits(p, bits) ((p).pte & (bits))
-#define pte_set_bits(p, bits) ((p).pte |= (bits))
-#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
-#define pte_copy(to, from) ({ (to).pte = (from).pte; })
-#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
-#define pte_set_val(p, phys, prot) \
- ({ (p).pte = (phys) | pgprot_val(prot); })
+#if CONFIG_PGTABLE_LEVELS > 2
+typedef struct { unsigned long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) } )
-typedef unsigned long long phys_t;
+#if CONFIG_PGTABLE_LEVELS > 3
-#else
-
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) } )
-#ifdef CONFIG_3_LEVEL_PGTABLES
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x) ((x).pmd)
-#define __pmd(x) ((pmd_t) { (x) } )
-#endif
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#define pte_val(x) ((x).pte)
-
#define pte_get_bits(p, bits) ((p).pte & (bits))
#define pte_set_bits(p, bits) ((p).pte |= (bits))
#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
#define pte_copy(to, from) ((to).pte = (from).pte)
-#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
+#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEEDSYNC))
#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
typedef unsigned long phys_t;
-#endif
-
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index de5e31c64793..04fb4e6969a4 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -31,7 +31,7 @@ do { \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
-#ifdef CONFIG_3_LEVEL_PGTABLES
+#if CONFIG_PGTABLE_LEVELS > 2
#define __pmd_free_tlb(tlb, pmd, address) \
do { \
@@ -39,6 +39,15 @@ do { \
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
} while (0)
+#if CONFIG_PGTABLE_LEVELS > 3
+
+#define __pud_free_tlb(tlb, pud, address) \
+do { \
+ pagetable_pud_dtor(virt_to_ptdesc(pud)); \
+ tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
+} while (0)
+
+#endif
#endif
#endif
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index 8256ecc5b919..ab0c8dd86564 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -31,7 +31,7 @@
printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
pgd_val(e))
-static inline int pgd_newpage(pgd_t pgd) { return 0; }
+static inline int pgd_needsync(pgd_t pgd) { return 0; }
static inline void pgd_mkuptodate(pgd_t pgd) { }
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-4level.h
index 8a5032ec231f..0d279caee93c 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-4level.h
@@ -4,21 +4,25 @@
* Derived from include/asm-i386/pgtable.h
*/
-#ifndef __UM_PGTABLE_3LEVEL_H
-#define __UM_PGTABLE_3LEVEL_H
+#ifndef __UM_PGTABLE_4LEVEL_H
+#define __UM_PGTABLE_4LEVEL_H
-#include <asm-generic/pgtable-nopud.h>
+#include <asm-generic/pgtable-nop4d.h>
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
-#ifdef CONFIG_64BIT
-#define PGDIR_SHIFT 30
-#else
-#define PGDIR_SHIFT 31
-#endif
+#define PGDIR_SHIFT 39
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
+/* PUD_SHIFT determines the size of the area a third-level page table can
+ * map
+ */
+
+#define PUD_SHIFT 30
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
/* PMD_SHIFT determines the size of the area a second-level page table can
* map
*/
@@ -32,13 +36,9 @@
*/
#define PTRS_PER_PTE 512
-#ifdef CONFIG_64BIT
#define PTRS_PER_PMD 512
+#define PTRS_PER_PUD 512
#define PTRS_PER_PGD 512
-#else
-#define PTRS_PER_PMD 1024
-#define PTRS_PER_PGD 1024
-#endif
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
@@ -48,11 +48,14 @@
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), \
+ pud_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
pgd_val(e))
-#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
+#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEEDSYNC))
#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
#define pud_populate(mm, pud, pmd) \
@@ -60,23 +63,40 @@
#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
-static inline int pgd_newpage(pgd_t pgd)
+#define p4d_none(x) (!(p4d_val(x) & ~_PAGE_NEEDSYNC))
+#define p4d_bad(x) ((p4d_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define p4d_present(x) (p4d_val(x) & _PAGE_PRESENT)
+#define p4d_populate(mm, p4d, pud) \
+ set_p4d(p4d, __p4d(_PAGE_TABLE + __pa(pud)))
+
+#define set_p4d(p4dptr, p4dval) (*(p4dptr) = (p4dval))
+
+
+static inline int pgd_needsync(pgd_t pgd)
{
- return(pgd_val(pgd) & _PAGE_NEWPAGE);
+ return pgd_val(pgd) & _PAGE_NEEDSYNC;
}
-static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
+static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEEDSYNC; }
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
static inline void pud_clear (pud_t *pud)
{
- set_pud(pud, __pud(_PAGE_NEWPAGE));
+ set_pud(pud, __pud(_PAGE_NEEDSYNC));
+}
+
+static inline void p4d_clear (p4d_t *p4d)
+{
+ set_p4d(p4d, __p4d(_PAGE_NEEDSYNC));
}
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK))
+#define p4d_page(p4d) phys_to_page(p4d_val(p4d) & PAGE_MASK)
+#define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & PAGE_MASK))
+
static inline unsigned long pte_pfn(pte_t pte)
{
return phys_to_pfn(pte_val(pte));
@@ -97,4 +117,3 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
}
#endif
-
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index faab5a2a4b06..0bd60afcc37d 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -11,8 +11,7 @@
#include <asm/fixmap.h>
#define _PAGE_PRESENT 0x001
-#define _PAGE_NEWPAGE 0x002
-#define _PAGE_NEWPROT 0x004
+#define _PAGE_NEEDSYNC 0x002
#define _PAGE_RW 0x020
#define _PAGE_USER 0x040
#define _PAGE_ACCESSED 0x080
@@ -24,10 +23,12 @@
/* We borrow bit 10 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x400
-#ifdef CONFIG_3_LEVEL_PGTABLES
-#include <asm/pgtable-3level.h>
-#else
+#if CONFIG_PGTABLE_LEVELS == 4
+#include <asm/pgtable-4level.h>
+#elif CONFIG_PGTABLE_LEVELS == 2
#include <asm/pgtable-2level.h>
+#else
+#error "Unsupported number of page table levels"
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
@@ -78,22 +79,22 @@ extern unsigned long end_iomem;
*/
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
+#define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
-#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
+#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
+#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0)
-#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
-#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
+#define pmd_needsync(x) (pmd_val(x) & _PAGE_NEEDSYNC)
+#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC)
-#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
-#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
+#define pud_needsync(x) (pud_val(x) & _PAGE_NEEDSYNC)
+#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC)
-#define p4d_newpage(x) (p4d_val(x) & _PAGE_NEWPAGE)
-#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
+#define p4d_needsync(x) (p4d_val(x) & _PAGE_NEEDSYNC)
+#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC)
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
@@ -144,14 +145,9 @@ static inline int pte_young(pte_t pte)
return pte_get_bits(pte, _PAGE_ACCESSED);
}
-static inline int pte_newpage(pte_t pte)
+static inline int pte_needsync(pte_t pte)
{
- return pte_get_bits(pte, _PAGE_NEWPAGE);
-}
-
-static inline int pte_newprot(pte_t pte)
-{
- return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
+ return pte_get_bits(pte, _PAGE_NEEDSYNC);
}
/*
@@ -160,12 +156,6 @@ static inline int pte_newprot(pte_t pte)
* =================================
*/
-static inline pte_t pte_mknewprot(pte_t pte)
-{
- pte_set_bits(pte, _PAGE_NEWPROT);
- return(pte);
-}
-
static inline pte_t pte_mkclean(pte_t pte)
{
pte_clear_bits(pte, _PAGE_DIRTY);
@@ -180,19 +170,14 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte)
{
- if (likely(pte_get_bits(pte, _PAGE_RW)))
- pte_clear_bits(pte, _PAGE_RW);
- else
- return pte;
- return(pte_mknewprot(pte));
+ pte_clear_bits(pte, _PAGE_RW);
+ return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
- if (unlikely(pte_get_bits(pte, _PAGE_USER)))
- return pte;
pte_set_bits(pte, _PAGE_USER);
- return(pte_mknewprot(pte));
+ return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
@@ -209,23 +194,19 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
- if (unlikely(pte_get_bits(pte, _PAGE_RW)))
- return pte;
pte_set_bits(pte, _PAGE_RW);
- return(pte_mknewprot(pte));
+ return pte;
}
static inline pte_t pte_mkuptodate(pte_t pte)
{
- pte_clear_bits(pte, _PAGE_NEWPAGE);
- if(pte_present(pte))
- pte_clear_bits(pte, _PAGE_NEWPROT);
- return(pte);
+ pte_clear_bits(pte, _PAGE_NEEDSYNC);
+ return pte;
}
-static inline pte_t pte_mknewpage(pte_t pte)
+static inline pte_t pte_mkneedsync(pte_t pte)
{
- pte_set_bits(pte, _PAGE_NEWPAGE);
+ pte_set_bits(pte, _PAGE_NEEDSYNC);
return(pte);
}
@@ -233,13 +214,11 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
{
pte_copy(*pteptr, pteval);
- /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
- * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
- * mapped pages.
+ /* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so
+ * update_pte_range knows to unmap it.
*/
- *pteptr = pte_mknewpage(*pteptr);
- if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
+ *pteptr = pte_mkneedsync(*pteptr);
}
#define PFN_PTE_SHIFT PAGE_SHIFT
@@ -279,7 +258,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
- return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
+ return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
}
/*
@@ -294,8 +273,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
({ pte_t pte; \
\
pte_set_val(pte, page_to_phys(page), (pgprot)); \
- if (pte_present(pte)) \
- pte_mknewprot(pte_mknewpage(pte)); \
pte;})
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -329,7 +306,7 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
* <--------------- offset ----------------> E < type -> 0 0 0 1 0
*
* E is the exclusive marker that is not stored in swap entries.
- * _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte().
+ * _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte().
*/
#define __swp_type(x) (((x).val >> 5) & 0x1f)
#define __swp_offset(x) ((x).val >> 11)
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index bce4595798da..5d6356eafffe 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -20,10 +20,7 @@ struct task_struct;
struct mm_struct;
struct thread_struct {
- struct pt_regs regs;
struct pt_regs *segv_regs;
- void *fault_addr;
- jmp_buf *fault_catcher;
struct task_struct *prev_sched;
struct arch_thread arch;
jmp_buf switch_buf;
@@ -33,12 +30,14 @@ struct thread_struct {
void *arg;
} thread;
} request;
+
+ /* Contains variable sized FP registers */
+ struct pt_regs regs;
};
#define INIT_THREAD \
{ \
.regs = EMPTY_REGS, \
- .fault_addr = NULL, \
.prev_sched = NULL, \
.arch = INIT_ARCH_THREAD, \
.request = { } \
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index c7b4b49826a2..f9ad06fcc991 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -17,35 +17,17 @@
#include <sysdep/ptrace_user.h>
struct thread_info {
- struct task_struct *task; /* main task structure */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
- struct thread_info *real_thread; /* Points to non-IRQ stack */
- unsigned long aux_fp_regs[FP_SIZE]; /* auxiliary fp_regs to save/restore
- them out-of-band */
};
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .real_thread = NULL, \
-}
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
- struct thread_info *ti;
- unsigned long mask = THREAD_SIZE - 1;
- void *p;
-
- asm volatile ("" : "=r" (p) : "0" (&ti));
- ti = (struct thread_info *) (((unsigned long)p) & ~mask);
- return ti;
}
#endif
diff --git a/arch/um/include/asm/tlbflush.h b/arch/um/include/asm/tlbflush.h
index db997976b6ea..13a3009942be 100644
--- a/arch/um/include/asm/tlbflush.h
+++ b/arch/um/include/asm/tlbflush.h
@@ -9,8 +9,8 @@
#include <linux/mm.h>
/*
- * In UML, we need to sync the TLB over by using mmap/munmap/mprotect syscalls
- * from the process handling the MM (which can be the kernel itself).
+ * In UML, we need to sync the TLB over by using mmap/munmap syscalls from
+ * the process handling the MM (which can be the kernel itself).
*
* To track updates, we can hook into set_ptes and flush_tlb_*. With set_ptes
* we catch all PTE transitions where memory that was unusable becomes usable.