diff --git a/mm/mlock.c b/mm/mlock.c index 25cc9e88c540c360cadbe2e9dca50dea74e0bce5..84da66b7bbf0db6bf18898593afdc32fc424dd80 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -169,7 +169,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - gup_flags = FOLL_TOUCH | FOLL_MLOCK; + gup_flags = FOLL_TOUCH; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW @@ -178,6 +178,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; + if (vma->vm_flags & VM_LOCKED) + gup_flags |= FOLL_MLOCK; + /* We don't try to access the guard page of a stack vma */ if (stack_guard_page(vma, start)) { addr += PAGE_SIZE; @@ -456,18 +459,15 @@ static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors) /* * Now fault in a range of pages within the first VMA. */ - if (vma->vm_flags & VM_LOCKED) { - ret = __mlock_vma_pages_range(vma, nstart, nend); - if (ret < 0 && ignore_errors) { - ret = 0; - continue; /* continue at next VMA */ - } - if (ret) { - ret = __mlock_posix_error_return(ret); - break; - } - } else - make_pages_present(nstart, nend); + ret = __mlock_vma_pages_range(vma, nstart, nend); + if (ret < 0 && ignore_errors) { + ret = 0; + continue; /* continue at next VMA */ + } + if (ret) { + ret = __mlock_posix_error_return(ret); + break; + } } up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */