提交 84d33df2 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds

mm: rename FOLL_MLOCK to FOLL_POPULATE

After commit a1fde08c ("VM: skip the stack guard page lookup in
get_user_pages only for mlock") FOLL_MLOCK has lost its original
meaning: we don't necessarily mlock the page if the flags is set -- we
also take VM_LOCKED into consideration.

Since we use the same codepath for __mm_populate(), let's rename
FOLL_MLOCK to FOLL_POPULATE.
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: NLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: NDavid Rientjes <rientjes@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 c21a6daf
...@@ -2109,7 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, ...@@ -2109,7 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */ * and return without waiting upon it */
#define FOLL_MLOCK 0x40 /* mark page as mlocked */ #define FOLL_POPULATE 0x40 /* fault in page */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
......
...@@ -92,7 +92,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -92,7 +92,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
*/ */
mark_page_accessed(page); mark_page_accessed(page);
} }
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
/* /*
* The preliminary mapping check is mainly to avoid the * The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE * pointless overhead of lock_page on the ZERO_PAGE
...@@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, ...@@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned int fault_flags = 0; unsigned int fault_flags = 0;
int ret; int ret;
/* For mlock, just skip the stack guard page. */ /* For mm_populate(), just skip the stack guard page. */
if ((*flags & FOLL_MLOCK) && if ((*flags & FOLL_POPULATE) &&
(stack_guard_page_start(vma, address) || (stack_guard_page_start(vma, address) ||
stack_guard_page_end(vma, address + PAGE_SIZE))) stack_guard_page_end(vma, address + PAGE_SIZE)))
return -ENOENT; return -ENOENT;
......
...@@ -1231,7 +1231,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -1231,7 +1231,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
pmd, _pmd, 1)) pmd, _pmd, 1))
update_mmu_cache_pmd(vma, addr, pmd); update_mmu_cache_pmd(vma, addr, pmd);
} }
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
if (page->mapping && trylock_page(page)) { if (page->mapping && trylock_page(page)) {
lru_add_drain(); lru_add_drain();
if (page->mapping) if (page->mapping)
......
...@@ -237,7 +237,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -237,7 +237,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
gup_flags = FOLL_TOUCH | FOLL_MLOCK; gup_flags = FOLL_TOUCH | FOLL_POPULATE;
/* /*
* We want to touch writable mappings with a write fault in order * We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW * to break COW, except for shared mappings because these don't COW
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册