提交 8fde12ca 编写于 作者: L Linus Torvalds

mm: prevent get_user_pages() from overflowing page refcount

If the page refcount wraps around past zero, it will be freed while
there are still four billion references to it.  One of the possible
avenues for an attacker to try to make this happen is by doing direct IO
on a page multiple times.  This patch makes get_user_pages() refuse to
take a new page reference if there are already more than two billion
references to the page.
Reported-by: NJann Horn <jannh@google.com>
Acked-by: NMatthew Wilcox <willy@infradead.org>
Cc: stable@kernel.org
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 88b1a17d
...@@ -157,8 +157,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -157,8 +157,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
goto retry; goto retry;
} }
if (flags & FOLL_GET) if (flags & FOLL_GET) {
get_page(page); if (unlikely(!try_get_page(page))) {
page = ERR_PTR(-ENOMEM);
goto out;
}
}
if (flags & FOLL_TOUCH) { if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) && if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page)) !pte_dirty(pte) && !PageDirty(page))
...@@ -295,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, ...@@ -295,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
ret = -EBUSY; ret = -EBUSY;
} else { } else {
get_page(page); if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
}
spin_unlock(ptl); spin_unlock(ptl);
lock_page(page); lock_page(page);
ret = split_huge_page(page); ret = split_huge_page(page);
...@@ -497,7 +504,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, ...@@ -497,7 +504,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
if (is_device_public_page(*page)) if (is_device_public_page(*page))
goto unmap; goto unmap;
} }
get_page(*page); if (unlikely(!try_get_page(*page))) {
ret = -ENOMEM;
goto unmap;
}
out: out:
ret = 0; ret = 0;
unmap: unmap:
...@@ -1393,6 +1403,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) ...@@ -1393,6 +1403,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
} }
} }
/*
* Return the compund head page with ref appropriately incremented,
* or NULL if that failed.
*/
static inline struct page *try_get_compound_head(struct page *page, int refs)
{
struct page *head = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(head) < 0))
return NULL;
if (unlikely(!page_cache_add_speculative(head, refs)))
return NULL;
return head;
}
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr) int write, struct page **pages, int *nr)
...@@ -1427,9 +1451,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ...@@ -1427,9 +1451,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte); page = pte_page(pte);
head = compound_head(page);
if (!page_cache_get_speculative(head)) head = try_get_compound_head(page, 1);
if (!head)
goto pte_unmap; goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) { if (unlikely(pte_val(pte) != pte_val(*ptep))) {
...@@ -1568,8 +1592,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, ...@@ -1568,8 +1592,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
refs++; refs++;
} while (addr += PAGE_SIZE, addr != end); } while (addr += PAGE_SIZE, addr != end);
head = compound_head(pmd_page(orig)); head = try_get_compound_head(pmd_page(orig), refs);
if (!page_cache_add_speculative(head, refs)) { if (!head) {
*nr -= refs; *nr -= refs;
return 0; return 0;
} }
...@@ -1606,8 +1630,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, ...@@ -1606,8 +1630,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
refs++; refs++;
} while (addr += PAGE_SIZE, addr != end); } while (addr += PAGE_SIZE, addr != end);
head = compound_head(pud_page(orig)); head = try_get_compound_head(pud_page(orig), refs);
if (!page_cache_add_speculative(head, refs)) { if (!head) {
*nr -= refs; *nr -= refs;
return 0; return 0;
} }
...@@ -1643,8 +1667,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, ...@@ -1643,8 +1667,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
refs++; refs++;
} while (addr += PAGE_SIZE, addr != end); } while (addr += PAGE_SIZE, addr != end);
head = compound_head(pgd_page(orig)); head = try_get_compound_head(pgd_page(orig), refs);
if (!page_cache_add_speculative(head, refs)) { if (!head) {
*nr -= refs; *nr -= refs;
return 0; return 0;
} }
......
...@@ -4298,6 +4298,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -4298,6 +4298,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte)); page = pte_page(huge_ptep_get(pte));
/*
* Instead of doing 'try_get_page()' below in the same_page
* loop, just check the count once here.
*/
if (unlikely(page_count(page) <= 0)) {
if (pages) {
spin_unlock(ptl);
remainder = 0;
err = -ENOMEM;
break;
}
}
same_page: same_page:
if (pages) { if (pages) {
pages[i] = mem_map_offset(page, pfn_offset); pages[i] = mem_map_offset(page, pfn_offset);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册