提交 86e5216f 编写于 作者: A Adam Litke 提交者: Linus Torvalds

[PATCH] Hugetlb: Reorganize hugetlb_fault to prepare for COW

This patch splits the "no_page()" type activity into its own function,
hugetlb_no_page().  hugetlb_fault() becomes the entry point for hugetlb faults
and delegates to the appropriate handler depending on the type of fault.
Right now we still have only hugetlb_no_page() but a later patch introduces a
COW fault.
Signed-off-by: NDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: NAdam Litke <agl@us.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "Seth, Rohit" <rohit.seth@intel.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 85ef47f7
...@@ -376,20 +376,15 @@ static struct page *find_or_alloc_huge_page(struct address_space *mapping, ...@@ -376,20 +376,15 @@ static struct page *find_or_alloc_huge_page(struct address_space *mapping,
return page; return page;
} }
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access) unsigned long address, pte_t *ptep)
{ {
int ret = VM_FAULT_SIGBUS; int ret = VM_FAULT_SIGBUS;
unsigned long idx; unsigned long idx;
unsigned long size; unsigned long size;
pte_t *pte;
struct page *page; struct page *page;
struct address_space *mapping; struct address_space *mapping;
pte = huge_pte_alloc(mm, address);
if (!pte)
goto out;
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
idx = ((address - vma->vm_start) >> HPAGE_SHIFT) idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
...@@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto backout; goto backout;
ret = VM_FAULT_MINOR; ret = VM_FAULT_MINOR;
if (!pte_none(*pte)) if (!pte_none(*ptep))
goto backout; goto backout;
add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page)); set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, page));
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
unlock_page(page); unlock_page(page);
out: out:
...@@ -426,6 +421,27 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -426,6 +421,27 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto out; goto out;
} }
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access)
{
pte_t *ptep;
pte_t entry;
ptep = huge_pte_alloc(mm, address);
if (!ptep)
return VM_FAULT_OOM;
entry = *ptep;
if (pte_none(entry))
return hugetlb_no_page(mm, vma, address, ptep);
/*
* We could get here if another thread instantiated the pte
* before the test above.
*/
return VM_FAULT_MINOR;
}
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas, struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i) unsigned long *position, int *length, int i)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册