提交 95042f9e 编写于 作者: L Linus Torvalds

vm: fix mlock() on stack guard page

Commit 53a7706d ("mlock: do not hold mmap_sem for extended periods
of time") changed mlock() to care about the exact number of pages that
__get_user_pages() had brought it.  Before, it would only care about
errors.

And that doesn't work, because we also handled one page specially in
__mlock_vma_pages_range(), namely the stack guard page.  So when that
case was handled, the number of pages that the function returned was off
by one.  In particular, it could be zero, and then the caller would end
up not making any progress at all.

Rather than try to fix up that off-by-one error for the mlock case
specially, this just moves the logic to handle the stack guard page
into__get_user_pages() itself, thus making all the counts come out
right automatically.
Reported-by: NRobert Święcki <robert@swiecki.net>
Cc: Hugh Dickins <hughd@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: stable@kernel.org
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 be85bcca
...@@ -1410,6 +1410,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, ...@@ -1410,6 +1410,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
return page; return page;
} }
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
return (vma->vm_flags & VM_GROWSDOWN) &&
(vma->vm_start == addr) &&
!vma_stack_continue(vma->vm_prev, addr);
}
/** /**
* __get_user_pages() - pin user pages in memory * __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task * @tsk: task_struct of target task
...@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vma = find_extend_vma(mm, start); vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) { if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK; unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(mm);
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
...@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_unmap(pte); pte_unmap(pte);
return i ? : -EFAULT; return i ? : -EFAULT;
} }
vma = get_gate_vma(mm);
if (pages) { if (pages) {
struct page *page; struct page *page;
page = vm_normal_page(gate_vma, start, *pte); page = vm_normal_page(vma, start, *pte);
if (!page) { if (!page) {
if (!(gup_flags & FOLL_DUMP) && if (!(gup_flags & FOLL_DUMP) &&
is_zero_pfn(pte_pfn(*pte))) is_zero_pfn(pte_pfn(*pte)))
...@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
get_page(page); get_page(page);
} }
pte_unmap(pte); pte_unmap(pte);
if (vmas) goto next_page;
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
nr_pages--;
continue;
} }
if (!vma || if (!vma ||
...@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
continue; continue;
} }
/*
* If we don't actually want the page itself,
* and it's the stack guard page, just skip it.
*/
if (!pages && stack_guard_page(vma, start))
goto next_page;
do { do {
struct page *page; struct page *page;
unsigned int foll_flags = gup_flags; unsigned int foll_flags = gup_flags;
...@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
flush_anon_page(vma, page, start); flush_anon_page(vma, page, start);
flush_dcache_page(page); flush_dcache_page(page);
} }
next_page:
if (vmas) if (vmas)
vmas[i] = vma; vmas[i] = vma;
i++; i++;
......
...@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page) ...@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page)
} }
} }
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
return (vma->vm_flags & VM_GROWSDOWN) &&
(vma->vm_start == addr) &&
!vma_stack_continue(vma->vm_prev, addr);
}
/** /**
* __mlock_vma_pages_range() - mlock a range of pages in the vma. * __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma * @vma: target vma
...@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
gup_flags |= FOLL_MLOCK; gup_flags |= FOLL_MLOCK;
/* We don't try to access the guard page of a stack vma */
if (stack_guard_page(vma, start)) {
addr += PAGE_SIZE;
nr_pages--;
}
return __get_user_pages(current, mm, addr, nr_pages, gup_flags, return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
NULL, NULL, nonblocking); NULL, NULL, nonblocking);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册