提交 69e68b4f 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds

mm: cleanup follow_page_mask()

Cleanups:
 - move pte-related code to separate function. It's about half of the
   function;
 - get rid of some goto-logic;
 - use 'return NULL' instead of 'return page' where page can only be
   NULL;
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f2b495ca
...@@ -12,105 +12,35 @@ ...@@ -12,105 +12,35 @@
#include "internal.h" #include "internal.h"
/** static struct page *no_page_table(struct vm_area_struct *vma,
* follow_page_mask - look up a page descriptor from a user-virtual address unsigned int flags)
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @page_mask: on output, *page_mask is set according to the size of the page
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* Returns the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned int *page_mask)
{ {
pgd_t *pgd; /*
pud_t *pud; * When core dumping an enormous anonymous area that nobody
pmd_t *pmd; * has touched so far, we don't want to allocate unnecessary pages or
pte_t *ptep, pte; * page tables. Return error instead of NULL to skip handle_mm_fault,
spinlock_t *ptl; * then get_dump_page() will return NULL to leave a hole in the dump.
struct page *page; * But we can only make this optimization where a hole would surely
struct mm_struct *mm = vma->vm_mm; * be zero-filled if handle_mm_fault() actually did handle it.
*/
*page_mask = 0; if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
page = follow_huge_addr(mm, address, flags & FOLL_WRITE); return NULL;
if (!IS_ERR(page)) { }
BUG_ON(flags & FOLL_GET);
goto out;
}
page = NULL;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto no_page_table;
pud = pud_offset(pgd, address); static struct page *follow_page_pte(struct vm_area_struct *vma,
if (pud_none(*pud)) unsigned long address, pmd_t *pmd, unsigned int flags)
goto no_page_table; {
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { struct mm_struct *mm = vma->vm_mm;
if (flags & FOLL_GET) struct page *page;
goto out; spinlock_t *ptl;
page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); pte_t *ptep, pte;
goto out;
}
if (unlikely(pud_bad(*pud)))
goto no_page_table;
pmd = pmd_offset(pud, address); retry:
if (pmd_none(*pmd))
goto no_page_table;
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
if (flags & FOLL_GET) {
/*
* Refcount on tail pages are not well-defined and
* shouldn't be taken. The caller should handle a NULL
* return when trying to follow tail pages.
*/
if (PageHead(page))
get_page(page);
else {
page = NULL;
goto out;
}
}
goto out;
}
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
goto no_page_table;
if (pmd_trans_huge(*pmd)) {
if (flags & FOLL_SPLIT) {
split_huge_page_pmd(vma, address, pmd);
goto split_fallthrough;
}
ptl = pmd_lock(mm, pmd);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(ptl);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
page = follow_trans_huge_pmd(vma, address,
pmd, flags);
spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1;
goto out;
}
} else
spin_unlock(ptl);
/* fall through */
}
split_fallthrough:
if (unlikely(pmd_bad(*pmd))) if (unlikely(pmd_bad(*pmd)))
goto no_page_table; return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl); ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep; pte = *ptep;
if (!pte_present(pte)) { if (!pte_present(pte)) {
swp_entry_t entry; swp_entry_t entry;
...@@ -128,12 +58,14 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -128,12 +58,14 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
goto no_page; goto no_page;
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address); migration_entry_wait(mm, pmd, address);
goto split_fallthrough; goto retry;
} }
if ((flags & FOLL_NUMA) && pte_numa(pte)) if ((flags & FOLL_NUMA) && pte_numa(pte))
goto no_page; goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte)) if ((flags & FOLL_WRITE) && !pte_write(pte)) {
goto unlock; pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte); page = vm_normal_page(vma, address, pte);
if (unlikely(!page)) { if (unlikely(!page)) {
...@@ -178,11 +110,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -178,11 +110,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
unlock_page(page); unlock_page(page);
} }
} }
unlock:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
out:
return page; return page;
bad_page: bad_page:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
...@@ -190,21 +119,99 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -190,21 +119,99 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
no_page: no_page:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte)) if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @page_mask: on output, *page_mask is set according to the size of the page
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* Returns the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned int *page_mask)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
*page_mask = 0;
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
return page; return page;
}
no_page_table: pgd = pgd_offset(mm, address);
/* if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
* When core dumping an enormous anonymous area that nobody return no_page_table(vma, flags);
* has touched so far, we don't want to allocate unnecessary pages or
* page tables. Return error instead of NULL to skip handle_mm_fault, pud = pud_offset(pgd, address);
* then get_dump_page() will return NULL to leave a hole in the dump. if (pud_none(*pud))
* But we can only make this optimization where a hole would surely return no_page_table(vma, flags);
* be zero-filled if handle_mm_fault() actually did handle it. if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
*/ if (flags & FOLL_GET)
if ((flags & FOLL_DUMP) && return NULL;
(!vma->vm_ops || !vma->vm_ops->fault)) page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
return ERR_PTR(-EFAULT); return page;
return page; }
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
if (flags & FOLL_GET) {
/*
* Refcount on tail pages are not well-defined and
* shouldn't be taken. The caller should handle a NULL
* return when trying to follow tail pages.
*/
if (PageHead(page))
get_page(page);
else
page = NULL;
}
return page;
}
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
return no_page_table(vma, flags);
if (pmd_trans_huge(*pmd)) {
if (flags & FOLL_SPLIT) {
split_huge_page_pmd(vma, address, pmd);
return follow_page_pte(vma, address, pmd, flags);
}
ptl = pmd_lock(mm, pmd);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(ptl);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
page = follow_trans_huge_pmd(vma, address,
pmd, flags);
spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1;
return page;
}
} else
spin_unlock(ptl);
}
return follow_page_pte(vma, address, pmd, flags);
} }
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册