summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2023-10-12 09:40:52 +0200
committerVasily Gorbik <gor@linux.ibm.com>2023-10-23 18:21:23 +0200
commitb20c8216c1e0d5a5a5c9f40df0cf9bbc795e84f1 (patch)
tree886a3e01c8bba4242715cc1a475e778bb29208a2 /arch/s390/mm
parent7c194d84a9ce662426b2ecb59da54bb80c6b1d91 (diff)
s390/mm,fault: move VM_FAULT_ERROR handling to do_exception()
Get rid of do_fault_error() and move its contents to do_exception(), which makes do_exception(). With removing do_fault_error() it is also possible to get rid of the handle_fault_error_nolock() wrapper. Instead rename do_no_context() to handle_fault_error_nolock(). In result the whole fault handling looks much more like on other architectures. Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c81
1 files changed, 34 insertions, 47 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index bee22e91e14b..249aefcf7c4e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -225,7 +225,7 @@ static void do_sigsegv(struct pt_regs *regs, int si_code)
force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
}
-static void do_no_context(struct pt_regs *regs, int si_code)
+static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
{
enum fault_type fault_type;
unsigned long address;
@@ -253,11 +253,6 @@ static void do_no_context(struct pt_regs *regs, int si_code)
die(regs, "Oops");
}
-static inline void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
-{
- do_no_context(regs, si_code);
-}
-
static void handle_fault_error(struct pt_regs *regs, int si_code)
{
struct mm_struct *mm = current->mm;
@@ -271,31 +266,6 @@ static void do_sigbus(struct pt_regs *regs)
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
}
-static void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
-{
- /* fault & VM_FAULT_ERROR */
- if (fault & VM_FAULT_OOM) {
- if (!user_mode(regs))
- do_no_context(regs, 0);
- else
- pagefault_out_of_memory();
- } else if (fault & VM_FAULT_SIGSEGV) {
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- do_no_context(regs, 0);
- else
- do_sigsegv(regs, SEGV_MAPERR);
- } else if (fault & VM_FAULT_SIGBUS) {
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- do_no_context(regs, 0);
- else
- do_sigbus(regs);
- } else {
- BUG();
- }
-}
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -362,9 +332,9 @@ static void do_exception(struct pt_regs *regs, int access)
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
- if (likely(!(fault & VM_FAULT_ERROR)))
- fault = 0;
- goto out;
+ if (unlikely(fault & VM_FAULT_ERROR))
+ goto error;
+ return;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
@@ -412,13 +382,14 @@ retry:
if (fault & VM_FAULT_COMPLETED) {
if (gmap) {
mmap_read_lock(mm);
- goto out_gmap;
+ goto gmap;
}
- fault = 0;
- goto out;
+ return;
+ }
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ mmap_read_unlock(mm);
+ goto error;
}
- if (unlikely(fault & VM_FAULT_ERROR))
- goto out_up;
if (fault & VM_FAULT_RETRY) {
if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
/*
@@ -433,7 +404,7 @@ retry:
mmap_read_lock(mm);
goto retry;
}
-out_gmap:
+gmap:
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
@@ -441,15 +412,31 @@ out_gmap:
return handle_fault_error(regs, SEGV_MAPERR);
if (address == -ENOMEM) {
fault = VM_FAULT_OOM;
- goto out_up;
+ mmap_read_unlock(mm);
+ goto error;
}
}
- fault = 0;
-out_up:
mmap_read_unlock(mm);
-out:
- if (unlikely(fault))
- do_fault_error(regs, fault);
+ return;
+error:
+ if (fault & VM_FAULT_OOM) {
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ else
+ pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
+ } else if (fault & VM_FAULT_SIGBUS) {
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ else
+ do_sigbus(regs);
+ } else {
+ BUG();
+ }
}
void do_protection_exception(struct pt_regs *regs)
@@ -477,7 +464,7 @@ void do_protection_exception(struct pt_regs *regs)
* Low-address protection in kernel mode means
* NULL pointer write access in kernel mode.
*/
- return do_no_context(regs, 0);
+ return handle_fault_error_nolock(regs, 0);
}
if (unlikely(MACHINE_HAS_NX && teid.b56)) {
regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);