diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 15:07:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 15:07:51 -0700 |
commit | 3eb514866f20c5eb74637279774b6d73b855480a (patch) | |
tree | 72506a3ee7caf658db86a63d1f21a483f8a5d6d6 /arch/arc/mm | |
parent | c309b6f24222246c18a8b65d3950e6e755440865 (diff) | |
parent | 24a20b0a443fd485852d51d08e98bbd9d212e0ec (diff) |
Merge tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta:
- long due rewrite of do_page_fault
- refactoring of entry/exit code to utilize the double load/store
instructions
- hsdk platform updates
* tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
ARC: [plat-hsdk]: Enable AXI DW DMAC in defconfig
ARC: [plat-hsdk]: enable DW SPI controller
ARC: hide unused function unw_hdr_alloc
ARC: [haps] Add Virtio support
ARCv2: entry: simplify return to Delay Slot via interrupt
ARC: entry: EV_Trap expects r10 (vs. r9) to have exception cause
ARCv2: entry: rewrite to enable use of double load/stores LDD/STD
ARCv2: entry: avoid a branch
ARCv2: entry: push out the Z flag unclobber from common EXCEPTION_PROLOGUE
ARCv2: entry: comments about hardware auto-save on taken interrupts
ARC: mm: do_page_fault refactor #8: release mmap_sem sooner
ARC: mm: do_page_fault refactor #7: fold the various error handling
ARC: mm: do_page_fault refactor #6: error handlers to use same pattern
ARC: mm: do_page_fault refactor #5: scoot no_context to end
ARC: mm: do_page_fault refactor #4: consolidate retry related logic
ARC: mm: do_page_fault refactor #3: tidyup vma access permission code
ARC: mm: do_page_fault refactor #2: remove short lived variable
ARC: mm: do_page_fault refactor #1: remove label @good_area
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/fault.c | 185 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 11 |
2 files changed, 88 insertions, 108 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 81e84426fe21..3861543b66a0 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -63,24 +63,19 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - int si_code = SEGV_MAPERR; - int ret; - vm_fault_t fault; - int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + int sig, si_code = SEGV_MAPERR; + unsigned int write = 0, exec = 0, mask; + vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ + unsigned int flags; /* handle_mm_fault() input */ /* - * We fault-in kernel-space virtual memory on-demand. The - * 'reference' page table is init_mm.pgd. - * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (address >= VMALLOC_START && !user_mode(regs)) { - ret = handle_kernel_vaddr_fault(address); - if (unlikely(ret)) + if (unlikely(handle_kernel_vaddr_fault(address))) goto no_context; else return; @@ -93,143 +88,117 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) if (faulthandler_disabled() || !mm) goto no_context; + if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ + write = 1; + else if ((regs->ecr_vec == ECR_V_PROTV) && + (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) + exec = 1; + + flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (user_mode(regs)) flags |= FAULT_FLAG_USER; + if (write) + flags |= FAULT_FLAG_WRITE; + retry: down_read(&mm->mmap_sem); + vma = find_vma(mm, address); if (!vma) goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + if (unlikely(address < vma->vm_start)) { + if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) + goto bad_area; + } /* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. + * vm_area is good, now check permissions for this memory access */ -good_area: - si_code = SEGV_ACCERR; - - /* Handle protection violation, execute on heap or stack */ - - if ((regs->ecr_vec == ECR_V_PROTV) && - (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) + mask = VM_READ; + if (write) + mask = VM_WRITE; + if (exec) + mask = VM_EXEC; + + if (!(vma->vm_flags & mask)) { + si_code = SEGV_ACCERR; goto bad_area; - - if (write) { - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - flags |= FAULT_FLAG_WRITE; - } else { - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) - goto bad_area; } - /* - * If for any reason at all we couldn't handle the fault, - * make sure we exit gracefully rather than endlessly redo - * the fault. - */ fault = handle_mm_fault(vma, address, flags); - if (fatal_signal_pending(current)) { + /* + * Fault retry nuances + */ + if (unlikely(fault & VM_FAULT_RETRY)) { /* - * if fault retry, mmap_sem already relinquished by core mm - * so OK to return to user mode (with signal handled first) + * If fault needs to be retried, handle any pending signals + * first (by returning to user mode). + * mmap_sem already relinquished by core mm for RETRY case */ - if (fault & VM_FAULT_RETRY) { + if (fatal_signal_pending(current)) { if (!user_mode(regs)) goto no_context; return; } - } - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - - if (likely(!(fault & VM_FAULT_ERROR))) { + /* + * retry state machine + */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - /* To avoid updating stats twice for retry case */ - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } - - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - goto retry; - } + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + goto retry; } - - /* Fault Handled Gracefully */ - up_read(&mm->mmap_sem); - return; } - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGSEGV) - goto bad_area; - else if (fault & VM_FAULT_SIGBUS) - goto do_sigbus; - - /* no man's land */ - BUG(); +bad_area: + up_read(&mm->mmap_sem); /* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. + * Major/minor page fault accounting + * (in case of retry we only land here once) */ -bad_area: - up_read(&mm->mmap_sem); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - /* User mode accesses just cause a SIGSEGV */ - if (user_mode(regs)) { - tsk->thread.fault_address = address; - force_sig_fault(SIGSEGV, si_code, (void __user *)address); - return; - } + if (likely(!(fault & VM_FAULT_ERROR))) { + if (fault & VM_FAULT_MAJOR) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + } -no_context: - /* Are we prepared to handle this kernel fault? - * - * (The kernel has valid exception-points in the source - * when it accesses user-memory. When it fails in one - * of those points, we find it in a table and do a jump - * to some fixup code that loads an appropriate error - * code) - */ - if (fixup_exception(regs)) + /* Normal return path: fault Handled Gracefully */ return; + } - die("Oops", regs, address); - -out_of_memory: - up_read(&mm->mmap_sem); + if (!user_mode(regs)) + goto no_context; - if (user_mode(regs)) { + if (fault & VM_FAULT_OOM) { pagefault_out_of_memory(); return; } - goto no_context; + if (fault & VM_FAULT_SIGBUS) { + sig = SIGBUS; + si_code = BUS_ADRERR; + } + else { + sig = SIGSEGV; + } -do_sigbus: - up_read(&mm->mmap_sem); + tsk->thread.fault_address = address; + force_sig_fault(sig, si_code, (void __user *)address); + return; - if (!user_mode(regs)) - goto no_context; +no_context: + if (fixup_exception(regs)) + return; - tsk->thread.fault_address = address; - force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); + die("Oops", regs, address); } diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 471a97bf492d..c55d95dd2f39 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -393,6 +393,17 @@ EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation ;-------- Common routine to call Linux Page Fault Handler ----------- do_slow_path_pf: +#ifdef CONFIG_ISA_ARCV2 + ; Set Z flag if exception in U mode. Hardware micro-ops do this on any + ; taken interrupt/exception, and thus is already the case at the entry + ; above, but ensuing code would have already clobbered. + ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set + + lr r2, [erstatus] + and r2, r2, STATUS_U_MASK + bxor.f 0, r2, STATUS_U_BIT +#endif + ; Restore the 4-scratch regs saved by fast path miss handler TLBMISS_RESTORE_REGS |