diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2013-08-02 15:17:24 -0400 |
---|---|---|
committer | Jérôme Glisse <jglisse@redhat.com> | 2015-08-10 11:15:00 -0400 |
commit | 735b0ec25d33cd21b2516c7e76cd05d6b6f7caa4 (patch) | |
tree | ae7d70759d1463dec5dbbb4697b0ec469a467a6b /mm/ksm.c | |
parent | f7644cbfcdf03528f0f450f3940c4985b2291f49 (diff) |
mmu_notifier: add event information to address invalidation v8
The event information will be useful for new user of mmu_notifier API.
The event argument differentiate between a vma disappearing, a page
being write protected or simply a page being unmaped. This allow new
user to take different path for different event for instance on unmap
the resource used to track a vma are still valid and should stay around.
While if the event is saying that a vma is being destroy it means that any
resources used to track this vma can be free.
Changed since v1:
- renamed action into event (updated commit message too).
- simplified the event names and clarified their usage
also documenting what exceptation the listener can have in
respect to each event.
Changed since v2:
- Avoid crazy name.
- Do not move code that do not need to move.
Changed since v3:
- Separate huge page split from mlock/munlock and softdirty.
Changed since v4:
- Rebase (no other changes).
Changed since v5:
- Typo fix.
- Changed zap_page_range from MMU_MUNMAP to MMU_MIGRATE to reflect the
fact that the address range is still valid just the page backing it
are no longer.
Changed since v6:
- try_to_unmap_one() only invalidate when doing migration.
- Differentiate fork from other case.
Changed since v7:
- Renamed MMU_HUGE_PAGE_SPLIT to MMU_HUGE_PAGE_SPLIT.
- Renamed MMU_ISDIRTY to MMU_CLEAR_SOFT_DIRTY.
- Renamed MMU_WRITE_PROTECT to MMU_KSM_WRITE_PROTECT.
- English syntax fixes.
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 18 |
1 files changed, 12 insertions, 6 deletions
@@ -872,7 +872,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, mmun_start = addr; mmun_end = addr + PAGE_SIZE; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end, + MMU_KSM_WRITE_PROTECT); ptep = page_check_address(page, mm, addr, &ptl, 0); if (!ptep) @@ -904,7 +905,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (pte_dirty(entry)) set_page_dirty(page); entry = pte_mkclean(pte_wrprotect(entry)); - set_pte_at_notify(mm, addr, ptep, entry); + set_pte_at_notify(mm, addr, ptep, entry, MMU_KSM_WRITE_PROTECT); } *orig_pte = *ptep; err = 0; @@ -912,7 +913,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, out_unlock: pte_unmap_unlock(ptep, ptl); out_mn: - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end, + MMU_KSM_WRITE_PROTECT); out: return err; } @@ -948,7 +950,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, mmun_start = addr; mmun_end = addr + PAGE_SIZE; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end, + MMU_MIGRATE); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte_same(*ptep, orig_pte)) { @@ -961,7 +964,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush_notify(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); + set_pte_at_notify(mm, addr, ptep, + mk_pte(kpage, vma->vm_page_prot), + MMU_MIGRATE); page_remove_rmap(page); if (!page_mapped(page)) @@ -971,7 +976,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, pte_unmap_unlock(ptep, ptl); err = 0; out_mn: - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end, + MMU_MIGRATE); out: return err; } |