提交 c2231020 编写于 作者: Y Yang Shi 提交者: Linus Torvalds

mm: thp: register mm for khugepaged when merging vma for shmem

When merging anonymous page vma, if the size of the vma can fit in at
least one hugepage, the mm will be registered for khugepaged for
collapsing THP in the future.

But it skips shmem vmas.  Do so for shmem also, but not for file-private
mappings when merging a vma in order to increase the odds of collapsing
a hugepage via khugepaged.

hugepage_vma_check() sounds like a good fit to do the check.  And move
the definition of it before khugepaged_enter_vma_merge() to avoid a
build error.

Link: http://lkml.kernel.org/r/1529697791-6950-1-git-send-email-yang.shi@linux.alibaba.comSigned-off-by: NYang Shi <yang.shi@linux.alibaba.com>
Acked-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 8cded866
...@@ -397,6 +397,25 @@ static inline int khugepaged_test_exit(struct mm_struct *mm) ...@@ -397,6 +397,25 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0; return atomic_read(&mm->mm_users) == 0;
} }
static bool hugepage_vma_check(struct vm_area_struct *vma)
{
if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
return false;
if (shmem_file(vma->vm_file)) {
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return false;
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
HPAGE_PMD_NR);
}
if (!vma->anon_vma || vma->vm_ops)
return false;
if (is_vma_temporary_stack(vma))
return false;
return !(vma->vm_flags & VM_NO_KHUGEPAGED);
}
int __khugepaged_enter(struct mm_struct *mm) int __khugepaged_enter(struct mm_struct *mm)
{ {
struct mm_slot *mm_slot; struct mm_slot *mm_slot;
...@@ -434,15 +453,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, ...@@ -434,15 +453,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
unsigned long vm_flags) unsigned long vm_flags)
{ {
unsigned long hstart, hend; unsigned long hstart, hend;
if (!vma->anon_vma)
/* /*
* Not yet faulted in so we will register later in the * khugepaged does not yet work on non-shmem files or special
* page fault if needed. * mappings. And file-private shmem THP is not supported.
*/ */
return 0; if (!hugepage_vma_check(vma))
if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
/* khugepaged not yet working on file or special mappings */
return 0; return 0;
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend) if (hstart < hend)
...@@ -819,25 +837,6 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) ...@@ -819,25 +837,6 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
} }
#endif #endif
static bool hugepage_vma_check(struct vm_area_struct *vma)
{
if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
return false;
if (shmem_file(vma->vm_file)) {
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return false;
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
HPAGE_PMD_NR);
}
if (!vma->anon_vma || vma->vm_ops)
return false;
if (is_vma_temporary_stack(vma))
return false;
return !(vma->vm_flags & VM_NO_KHUGEPAGED);
}
/* /*
* If mmap_sem temporarily dropped, revalidate vma * If mmap_sem temporarily dropped, revalidate vma
* before taking mmap_sem. * before taking mmap_sem.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册