diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-07-26 15:26:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 16:19:19 -0700 |
commit | 657e3038c4e6fcd3cef41f2b01c655a685a7b8c7 (patch) | |
tree | ad015c70c6a1befac182b9113ac4d27cf94dadb7 /mm/huge_memory.c | |
parent | 800d8c63b2e989c2e349632d1648119bf5862f01 (diff) |
shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings
Let's wire up existing madvise() hugepage hints for file mappings.
MADV_HUGEPAGE advise shmem to allocate huge page on page fault in the
VMA. It only has effect if the filesystem is mounted with huge=advise
or huge=within_size.
MADV_NOHUGEPAGE prevents hugepage from being allocated on page fault in
the VMA. It doesn't prevent a huge page from being allocated by other
means, i.e. page fault into different mapping or write(2) into file.
Link: http://lkml.kernel.org/r/1466021202-61880-31-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 19 |
1 files changed, 5 insertions, 14 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0f58460cd69c..5eba97874ad5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1830,7 +1830,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) return NULL; } -#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) +#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) @@ -1846,11 +1846,6 @@ int hugepage_madvise(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) return 0; #endif - /* - * Be somewhat over-protective like KSM for now! - */ - if (*vm_flags & VM_NO_THP) - return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* @@ -1858,15 +1853,11 @@ int hugepage_madvise(struct vm_area_struct *vma, * register it here without waiting a page fault that * may not happen any time soon. */ - if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) + if (!(*vm_flags & VM_NO_KHUGEPAGED) && + khugepaged_enter_vma_merge(vma, *vm_flags)) return -ENOMEM; break; case MADV_NOHUGEPAGE: - /* - * Be somewhat over-protective like KSM for now! - */ - if (*vm_flags & VM_NO_THP) - return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; /* @@ -1974,7 +1965,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, * page fault if needed. */ return 0; - if (vma->vm_ops || (vm_flags & VM_NO_THP)) + if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) /* khugepaged not yet working on file or special mappings */ return 0; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; @@ -2366,7 +2357,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return false; if (is_vma_temporary_stack(vma)) return false; - return !(vma->vm_flags & VM_NO_THP); + return !(vma->vm_flags & VM_NO_KHUGEPAGED); } /* |