提交 77bf45e7 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds

mempolicy: do not try to queue pages from !vma_migratable()

Maybe I miss some point, but I don't see a reason why we try to queue
pages from non migratable VMAs.

This testcase steps on VM_BUG_ON_PAGE() in isolate_lru_page():

    #include <fcntl.h>
    #include <unistd.h>
    #include <stdio.h>
    #include <sys/mman.h>
    #include <numaif.h>

    #define SIZE 0x2000

    int foo;

    int main()
    {
        int fd;
        char *p;
        unsigned long mask = 2;

        fd = open("/dev/sg0", O_RDWR);
        p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
        /* Faultin pages */
        foo = p[0] + p[0x1000];
        mbind(p, SIZE, MPOL_BIND, &mask, 4, MPOL_MF_MOVE | MPOL_MF_STRICT);
        return 0;
    }

The only case when we can queue pages from such VMA is MPOL_MF_STRICT
plus MPOL_MF_MOVE or MPOL_MF_MOVE_ALL for VMA which has pages on LRU,
but gfp mask is not sutable for migaration (see mapping_gfp_mask() check
in vma_migratable()).  That's looks like a bug to me.

Let's filter out non-migratable vma at start of queue_pages_test_walk()
and go to queue_pages_pte_range() only if MPOL_MF_MOVE or
MPOL_MF_MOVE_ALL flag is set.
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: NDmitry Vyukov <dvyukov@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 564e81a5
...@@ -548,8 +548,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -548,8 +548,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
goto retry; goto retry;
} }
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) migrate_page_add(page, qp->pagelist, flags);
migrate_page_add(page, qp->pagelist, flags);
} }
pte_unmap_unlock(pte - 1, ptl); pte_unmap_unlock(pte - 1, ptl);
cond_resched(); cond_resched();
...@@ -625,7 +624,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, ...@@ -625,7 +624,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
unsigned long endvma = vma->vm_end; unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags; unsigned long flags = qp->flags;
if (vma->vm_flags & VM_PFNMAP) if (!vma_migratable(vma))
return 1; return 1;
if (endvma > end) if (endvma > end)
...@@ -644,16 +643,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, ...@@ -644,16 +643,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
if (flags & MPOL_MF_LAZY) { if (flags & MPOL_MF_LAZY) {
/* Similar to task_numa_work, skip inaccessible VMAs */ /* Similar to task_numa_work, skip inaccessible VMAs */
if (vma_migratable(vma) && if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
change_prot_numa(vma, start, endvma); change_prot_numa(vma, start, endvma);
return 1; return 1;
} }
if ((flags & MPOL_MF_STRICT) || /* queue pages from current vma */
((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
vma_migratable(vma)))
/* queue pages from current vma */
return 0; return 0;
return 1; return 1;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册