diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-14 19:42:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-14 19:42:11 -0700 |
commit | fec88ab0af9706b2201e5daf377c5031c62d11f7 (patch) | |
tree | 7206e8a3ff2dea87f912f4660d453a8c118248ac /mm/memory.c | |
parent | fa6e951a2a440babd7a7310d0f4713e618061767 (diff) | |
parent | cc5dfd59e375f4d0f2b64643723d16b38b2f2d78 (diff) |
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull HMM updates from Jason Gunthorpe:
"Improvements and bug fixes for the hmm interface in the kernel:
- Improve clarity, locking and APIs related to the 'hmm mirror'
feature merged last cycle. In linux-next we now see AMDGPU and
nouveau to be using this API.
- Remove old or transitional hmm APIs. These are hold overs from the
past with no users, or APIs that existed only to manage cross tree
conflicts. There are still a few more of these cleanups that didn't
make the merge window cut off.
- Improve some core mm APIs:
- export alloc_pages_vma() for driver use
- refactor into devm_request_free_mem_region() to manage
DEVICE_PRIVATE resource reservations
- refactor duplicative driver code into the core dev_pagemap
struct
- Remove hmm wrappers of improved core mm APIs, instead have drivers
use the simplified API directly
- Remove DEVICE_PUBLIC
- Simplify the kconfig flow for the hmm users and core code"
* tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (42 commits)
mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR
mm: remove the HMM config option
mm: sort out the DEVICE_PRIVATE Kconfig mess
mm: simplify ZONE_DEVICE page private data
mm: remove hmm_devmem_add
mm: remove hmm_vma_alloc_locked_page
nouveau: use devm_memremap_pages directly
nouveau: use alloc_page_vma directly
PCI/P2PDMA: use the dev_pagemap internal refcount
device-dax: use the dev_pagemap internal refcount
memremap: provide an optional internal refcount in struct dev_pagemap
memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag
memremap: remove the data field in struct dev_pagemap
memremap: add a migrate_to_ram method to struct dev_pagemap_ops
memremap: lift the devmap_enable manipulation into devm_memremap_pages
memremap: pass a struct dev_pagemap to ->kill and ->cleanup
memremap: move dev_pagemap callbacks into a separate structure
memremap: validate the pagemap type passed to devm_memremap_pages
mm: factor out a devm_request_free_mem_region helper
mm: export alloc_pages_vma
...
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 49 |
1 files changed, 5 insertions, 44 deletions
diff --git a/mm/memory.c b/mm/memory.c index 53bd59579861..89325f9c6173 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -571,8 +571,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, * PFNMAP mappings in order to support COWable mappings. * */ -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device) +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte) { unsigned long pfn = pte_pfn(pte); @@ -585,29 +585,6 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, return NULL; if (is_zero_pfn(pfn)) return NULL; - - /* - * Device public pages are special pages (they are ZONE_DEVICE - * pages but different from persistent memory). They behave - * allmost like normal pages. The difference is that they are - * not on the lru and thus should never be involve with any- - * thing that involve lru manipulation (mlock, numa balancing, - * ...). - * - * This is why we still want to return NULL for such page from - * vm_normal_page() so that we do not have to special case all - * call site of vm_normal_page(). - */ - if (likely(pfn <= highest_memmap_pfn)) { - struct page *page = pfn_to_page(pfn); - - if (is_device_public_page(page)) { - if (with_public_device) - return page; - return NULL; - } - } - if (pte_devmap(pte)) return NULL; @@ -797,17 +774,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, rss[mm_counter(page)]++; } else if (pte_devmap(pte)) { page = pte_page(pte); - - /* - * Cache coherent device memory behave like regular page and - * not like persistent memory page. For more informations see - * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h - */ - if (is_device_public_page(page)) { - get_page(page); - page_dup_rmap(page, false); - rss[mm_counter(page)]++; - } } out_set_pte: @@ -1063,7 +1029,7 @@ again: if (pte_present(ptent)) { struct page *page; - page = _vm_normal_page(vma, addr, ptent, true); + page = vm_normal_page(vma, addr, ptent); if (unlikely(details) && page) { /* * unmap_shared_mapping_pages() wants to @@ -2777,13 +2743,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); } else if (is_device_private_entry(entry)) { - /* - * For un-addressable device memory we call the pgmap - * fault handler callback. The callback must migrate - * the page back to some CPU accessible page. - */ - ret = device_private_entry_fault(vma, vmf->address, entry, - vmf->flags, vmf->pmd); + vmf->page = device_private_entry_to_page(entry); + ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { |