diff options
author | Jan Kara <jack@suse.cz> | 2016-11-24 09:24:21 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2016-11-29 15:36:39 +1100 |
commit | 4685c8a2a71d45240262cfad601f44aa2aff67ae (patch) | |
tree | fa95869e5f97dda7f12926be2d5553e14ae5529c | |
parent | 94be2b050c5d3d7fffe9033f7bdb5a485eb23d31 (diff) |
mm: factor out functionality to finish page faults
Introduce finish_fault() as a helper function for finishing page faults.
It is rather thin wrapper around alloc_set_pte() but since we'd want to
call this from DAX code or filesystems, it is still useful to avoid some
boilerplate code.
Link: http://lkml.kernel.org/r/1479460644-25076-10-git-send-email-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | mm/memory.c | 44 |
2 files changed, 36 insertions, 9 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index f3445efe1241..3191a4a07f58 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -620,6 +620,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page); +int finish_fault(struct vm_fault *vmf); #endif /* diff --git a/mm/memory.c b/mm/memory.c index f5460cae9103..88b9558a7fdd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3074,6 +3074,38 @@ fault_handled: return ret; } + +/** + * finish_fault - finish page fault once we have prepared the page to fault + * + * @vmf: structure describing the fault + * + * This function handles all that is needed to finish a page fault once the + * page to fault in is prepared. It handles locking of PTEs, inserts PTE for + * given page, adds reverse page mapping, handles memcg charges and LRU + * addition. The function returns 0 on success, VM_FAULT_ code in case of + * error. + * + * The function expects the page to be locked and on success it consumes a + * reference of a page being mapped (for the PTE which maps it). + */ +int finish_fault(struct vm_fault *vmf) +{ + struct page *page; + int ret; + + /* Did we COW the page? */ + if ((vmf->flags & FAULT_FLAG_WRITE) && + !(vmf->vma->vm_flags & VM_SHARED)) + page = vmf->cow_page; + else + page = vmf->page; + ret = alloc_set_pte(vmf, vmf->memcg, page); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); + return ret; +} + static unsigned long fault_around_bytes __read_mostly = rounddown_pow_of_two(65536); @@ -3213,9 +3245,7 @@ static int do_read_fault(struct vm_fault *vmf) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - ret |= alloc_set_pte(vmf, NULL, vmf->page); - if (vmf->pte) - pte_unmap_unlock(vmf->pte, vmf->ptl); + ret |= finish_fault(vmf); unlock_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) put_page(vmf->page); @@ -3250,9 +3280,7 @@ static int do_cow_fault(struct vm_fault *vmf) copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); __SetPageUptodate(vmf->cow_page); - ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page); - if (vmf->pte) - pte_unmap_unlock(vmf->pte, vmf->ptl); + ret |= finish_fault(vmf); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(vmf->page); put_page(vmf->page); @@ -3293,9 +3321,7 @@ static int do_shared_fault(struct vm_fault *vmf) } } - ret |= alloc_set_pte(vmf, NULL, vmf->page); - if (vmf->pte) - pte_unmap_unlock(vmf->pte, vmf->ptl); + ret |= finish_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { unlock_page(vmf->page); |