提交 82b0f8c3 编写于 作者: J Jan Kara 提交者: Linus Torvalds

mm: join struct fault_env and vm_fault

Currently we have two different structures for passing fault information
around - struct vm_fault and struct fault_env.  DAX will need more
information in struct vm_fault to handle its faults so the content of
that structure would become event closer to fault_env.  Furthermore it
would need to generate struct fault_env to be able to call some of the
generic functions.  So at this point I don't think there's much use in
keeping these two structures separate.  Just embed into struct vm_fault
all that is needed to use it for both purposes.

Link: http://lkml.kernel.org/r/1479460644-25076-2-git-send-email-jack@suse.czSigned-off-by: NJan Kara <jack@suse.cz>
Acked-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 8b7457ef
...@@ -556,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must ...@@ -556,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must
not block. If it's not possible to reach a page without blocking, not block. If it's not possible to reach a page without blocking,
filesystem should skip it. Filesystem should use do_set_pte() to setup filesystem should skip it. Filesystem should use do_set_pte() to setup
page table entry. Pointer to entry associated with the page is passed in page table entry. Pointer to entry associated with the page is passed in
"pte" field in fault_env structure. Pointers to entries for other offsets "pte" field in vm_fault structure. Pointers to entries for other offsets
should be calculated relative to "pte". should be calculated relative to "pte".
->page_mkwrite() is called when a previously read-only pte is ->page_mkwrite() is called when a previously read-only pte is
......
...@@ -257,9 +257,9 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, ...@@ -257,9 +257,9 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
* fatal_signal_pending()s, and the mmap_sem must be released before * fatal_signal_pending()s, and the mmap_sem must be released before
* returning it. * returning it.
*/ */
int handle_userfault(struct fault_env *fe, unsigned long reason) int handle_userfault(struct vm_fault *vmf, unsigned long reason)
{ {
struct mm_struct *mm = fe->vma->vm_mm; struct mm_struct *mm = vmf->vma->vm_mm;
struct userfaultfd_ctx *ctx; struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq; struct userfaultfd_wait_queue uwq;
int ret; int ret;
...@@ -268,7 +268,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) ...@@ -268,7 +268,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
ctx = fe->vma->vm_userfaultfd_ctx.ctx; ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx) if (!ctx)
goto out; goto out;
...@@ -301,17 +301,18 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) ...@@ -301,17 +301,18 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
* without first stopping userland access to the memory. For * without first stopping userland access to the memory. For
* VM_UFFD_MISSING userfaults this is enough for now. * VM_UFFD_MISSING userfaults this is enough for now.
*/ */
if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) { if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
/* /*
* Validate the invariant that nowait must allow retry * Validate the invariant that nowait must allow retry
* to be sure not to return SIGBUS erroneously on * to be sure not to return SIGBUS erroneously on
* nowait invocations. * nowait invocations.
*/ */
BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT); BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_WARNING printk(KERN_WARNING
"FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags); "FAULT_FLAG_ALLOW_RETRY missing %x\n",
vmf->flags);
dump_stack(); dump_stack();
} }
#endif #endif
...@@ -323,7 +324,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) ...@@ -323,7 +324,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
* and wait. * and wait.
*/ */
ret = VM_FAULT_RETRY; ret = VM_FAULT_RETRY;
if (fe->flags & FAULT_FLAG_RETRY_NOWAIT) if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out; goto out;
/* take the reference before dropping the mmap_sem */ /* take the reference before dropping the mmap_sem */
...@@ -331,11 +332,11 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) ...@@ -331,11 +332,11 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
uwq.wq.private = current; uwq.wq.private = current;
uwq.msg = userfault_msg(fe->address, fe->flags, reason); uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
uwq.ctx = ctx; uwq.ctx = ctx;
return_to_userland = return_to_userland =
(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
spin_lock(&ctx->fault_pending_wqh.lock); spin_lock(&ctx->fault_pending_wqh.lock);
...@@ -353,7 +354,8 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) ...@@ -353,7 +354,8 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
TASK_KILLABLE); TASK_KILLABLE);
spin_unlock(&ctx->fault_pending_wqh.lock); spin_unlock(&ctx->fault_pending_wqh.lock);
must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
reason);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (likely(must_wait && !ACCESS_ONCE(ctx->released) && if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
......
#ifndef _LINUX_HUGE_MM_H #ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H
extern int do_huge_pmd_anonymous_page(struct fault_env *fe); extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma); struct vm_area_struct *vma);
extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
pmd_t *pmd, pmd_t *pmd,
...@@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page) ...@@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page)
return 1; return 1;
} }
extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page; extern struct page *huge_zero_page;
...@@ -212,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, ...@@ -212,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
return NULL; return NULL;
} }
static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
{ {
return 0; return 0;
} }
......
...@@ -292,10 +292,16 @@ extern pgprot_t protection_map[16]; ...@@ -292,10 +292,16 @@ extern pgprot_t protection_map[16];
* pgoff should be used in favour of virtual_address, if possible. * pgoff should be used in favour of virtual_address, if possible.
*/ */
struct vm_fault { struct vm_fault {
struct vm_area_struct *vma; /* Target VMA */
unsigned int flags; /* FAULT_FLAG_xxx flags */ unsigned int flags; /* FAULT_FLAG_xxx flags */
gfp_t gfp_mask; /* gfp mask to be used for allocations */ gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */ pgoff_t pgoff; /* Logical page offset based on vma */
void __user *virtual_address; /* Faulting virtual address */ unsigned long address; /* Faulting virtual address */
void __user *virtual_address; /* Faulting virtual address masked by
* PAGE_MASK */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address'
*/
struct page *cow_page; /* Handler may choose to COW */ struct page *cow_page; /* Handler may choose to COW */
struct page *page; /* ->fault handlers should return a struct page *page; /* ->fault handlers should return a
...@@ -309,19 +315,7 @@ struct vm_fault { ...@@ -309,19 +315,7 @@ struct vm_fault {
* VM_FAULT_DAX_LOCKED and fill in * VM_FAULT_DAX_LOCKED and fill in
* entry here. * entry here.
*/ */
}; /* These three entries are valid only while holding ptl lock */
/*
* Page fault context: passes though page fault handler instead of endless list
* of function arguments.
*/
struct fault_env {
struct vm_area_struct *vma; /* Target VMA */
unsigned long address; /* Faulting virtual address */
unsigned int flags; /* FAULT_FLAG_xxx flags */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address'
*/
pte_t *pte; /* Pointer to pte entry matching pte_t *pte; /* Pointer to pte entry matching
* the 'address'. NULL if the page * the 'address'. NULL if the page
* table hasn't been allocated. * table hasn't been allocated.
...@@ -351,7 +345,7 @@ struct vm_operations_struct { ...@@ -351,7 +345,7 @@ struct vm_operations_struct {
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*pmd_fault)(struct vm_area_struct *, unsigned long address, int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
pmd_t *, unsigned int flags); pmd_t *, unsigned int flags);
void (*map_pages)(struct fault_env *fe, void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff); pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become /* notification that a previously read-only page is about to become
...@@ -625,7 +619,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) ...@@ -625,7 +619,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte; return pte;
} }
int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page); struct page *page);
#endif #endif
...@@ -2094,7 +2088,7 @@ extern void truncate_inode_pages_final(struct address_space *); ...@@ -2094,7 +2088,7 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */ /* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
extern void filemap_map_pages(struct fault_env *fe, extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff); pgoff_t start_pgoff, pgoff_t end_pgoff);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
extern int handle_userfault(struct fault_env *fe, unsigned long reason); extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len); unsigned long src_start, unsigned long len);
...@@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) ...@@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
#else /* CONFIG_USERFAULTFD */ #else /* CONFIG_USERFAULTFD */
/* mm helpers */ /* mm helpers */
static inline int handle_userfault(struct fault_env *fe, unsigned long reason) static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
{ {
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
......
...@@ -2164,12 +2164,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2164,12 +2164,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct fault_env *fe, void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff) pgoff_t start_pgoff, pgoff_t end_pgoff)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void **slot;
struct file *file = fe->vma->vm_file; struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff; pgoff_t last_pgoff = start_pgoff;
loff_t size; loff_t size;
...@@ -2225,11 +2225,11 @@ void filemap_map_pages(struct fault_env *fe, ...@@ -2225,11 +2225,11 @@ void filemap_map_pages(struct fault_env *fe,
if (file->f_ra.mmap_miss > 0) if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--; file->f_ra.mmap_miss--;
fe->address += (iter.index - last_pgoff) << PAGE_SHIFT; vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
if (fe->pte) if (vmf->pte)
fe->pte += iter.index - last_pgoff; vmf->pte += iter.index - last_pgoff;
last_pgoff = iter.index; last_pgoff = iter.index;
if (alloc_set_pte(fe, NULL, page)) if (alloc_set_pte(vmf, NULL, page))
goto unlock; goto unlock;
unlock_page(page); unlock_page(page);
goto next; goto next;
...@@ -2239,7 +2239,7 @@ void filemap_map_pages(struct fault_env *fe, ...@@ -2239,7 +2239,7 @@ void filemap_map_pages(struct fault_env *fe,
put_page(page); put_page(page);
next: next:
/* Huge page is mapped? No need to proceed. */ /* Huge page is mapped? No need to proceed. */
if (pmd_trans_huge(*fe->pmd)) if (pmd_trans_huge(*vmf->pmd))
break; break;
if (iter.index == end_pgoff) if (iter.index == end_pgoff)
break; break;
......
...@@ -542,13 +542,13 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -542,13 +542,13 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
} }
EXPORT_SYMBOL_GPL(thp_get_unmapped_area); EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
gfp_t gfp) gfp_t gfp)
{ {
struct vm_area_struct *vma = fe->vma; struct vm_area_struct *vma = vmf->vma;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
pgtable_t pgtable; pgtable_t pgtable;
unsigned long haddr = fe->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
VM_BUG_ON_PAGE(!PageCompound(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page);
...@@ -573,9 +573,9 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, ...@@ -573,9 +573,9 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
*/ */
__SetPageUptodate(page); __SetPageUptodate(page);
fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_none(*fe->pmd))) { if (unlikely(!pmd_none(*vmf->pmd))) {
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, true); mem_cgroup_cancel_charge(page, memcg, true);
put_page(page); put_page(page);
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
...@@ -586,11 +586,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, ...@@ -586,11 +586,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
if (userfaultfd_missing(vma)) { if (userfaultfd_missing(vma)) {
int ret; int ret;
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, true); mem_cgroup_cancel_charge(page, memcg, true);
put_page(page); put_page(page);
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
ret = handle_userfault(fe, VM_UFFD_MISSING); ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK); VM_BUG_ON(ret & VM_FAULT_FALLBACK);
return ret; return ret;
} }
...@@ -600,11 +600,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, ...@@ -600,11 +600,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
page_add_new_anon_rmap(page, vma, haddr, true); page_add_new_anon_rmap(page, vma, haddr, true);
mem_cgroup_commit_charge(page, memcg, false, true); mem_cgroup_commit_charge(page, memcg, false, true);
lru_cache_add_active_or_unevictable(page, vma); lru_cache_add_active_or_unevictable(page, vma);
pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
atomic_long_inc(&vma->vm_mm->nr_ptes); atomic_long_inc(&vma->vm_mm->nr_ptes);
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC); count_vm_event(THP_FAULT_ALLOC);
} }
...@@ -651,12 +651,12 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, ...@@ -651,12 +651,12 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
return true; return true;
} }
int do_huge_pmd_anonymous_page(struct fault_env *fe) int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = fe->vma; struct vm_area_struct *vma = vmf->vma;
gfp_t gfp; gfp_t gfp;
struct page *page; struct page *page;
unsigned long haddr = fe->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
...@@ -664,7 +664,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) ...@@ -664,7 +664,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma, vma->vm_flags))) if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (!(fe->flags & FAULT_FLAG_WRITE) && if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm) && !mm_forbids_zeropage(vma->vm_mm) &&
transparent_hugepage_use_zero_page()) { transparent_hugepage_use_zero_page()) {
pgtable_t pgtable; pgtable_t pgtable;
...@@ -680,22 +680,22 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) ...@@ -680,22 +680,22 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
ret = 0; ret = 0;
set = false; set = false;
if (pmd_none(*fe->pmd)) { if (pmd_none(*vmf->pmd)) {
if (userfaultfd_missing(vma)) { if (userfaultfd_missing(vma)) {
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
ret = handle_userfault(fe, VM_UFFD_MISSING); ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK); VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else { } else {
set_huge_zero_page(pgtable, vma->vm_mm, vma, set_huge_zero_page(pgtable, vma->vm_mm, vma,
haddr, fe->pmd, zero_page); haddr, vmf->pmd, zero_page);
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
set = true; set = true;
} }
} else } else
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
if (!set) if (!set)
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
return ret; return ret;
...@@ -707,7 +707,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) ...@@ -707,7 +707,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
prep_transhuge_page(page); prep_transhuge_page(page);
return __do_huge_pmd_anonymous_page(fe, page, gfp); return __do_huge_pmd_anonymous_page(vmf, page, gfp);
} }
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
...@@ -879,30 +879,30 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -879,30 +879,30 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return ret; return ret;
} }
void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd) void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
{ {
pmd_t entry; pmd_t entry;
unsigned long haddr; unsigned long haddr;
fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd); vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto unlock; goto unlock;
entry = pmd_mkyoung(orig_pmd); entry = pmd_mkyoung(orig_pmd);
haddr = fe->address & HPAGE_PMD_MASK; haddr = vmf->address & HPAGE_PMD_MASK;
if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
fe->flags & FAULT_FLAG_WRITE)) vmf->flags & FAULT_FLAG_WRITE))
update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd); update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
unlock: unlock:
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
} }
static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
struct page *page) struct page *page)
{ {
struct vm_area_struct *vma = fe->vma; struct vm_area_struct *vma = vmf->vma;
unsigned long haddr = fe->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
pgtable_t pgtable; pgtable_t pgtable;
pmd_t _pmd; pmd_t _pmd;
...@@ -921,7 +921,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, ...@@ -921,7 +921,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
for (i = 0; i < HPAGE_PMD_NR; i++) { for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
__GFP_OTHER_NODE, vma, __GFP_OTHER_NODE, vma,
fe->address, page_to_nid(page)); vmf->address, page_to_nid(page));
if (unlikely(!pages[i] || if (unlikely(!pages[i] ||
mem_cgroup_try_charge(pages[i], vma->vm_mm, mem_cgroup_try_charge(pages[i], vma->vm_mm,
GFP_KERNEL, &memcg, false))) { GFP_KERNEL, &memcg, false))) {
...@@ -952,15 +952,15 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, ...@@ -952,15 +952,15 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
mmun_end = haddr + HPAGE_PMD_SIZE; mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto out_free_pages; goto out_free_pages;
VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(!PageHead(page), page);
pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
/* leave pmd empty until pte is filled */ /* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd); pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
pmd_populate(vma->vm_mm, &_pmd, pgtable); pmd_populate(vma->vm_mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
...@@ -969,20 +969,20 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, ...@@ -969,20 +969,20 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
memcg = (void *)page_private(pages[i]); memcg = (void *)page_private(pages[i]);
set_page_private(pages[i], 0); set_page_private(pages[i], 0);
page_add_new_anon_rmap(pages[i], fe->vma, haddr, false); page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
mem_cgroup_commit_charge(pages[i], memcg, false, false); mem_cgroup_commit_charge(pages[i], memcg, false, false);
lru_cache_add_active_or_unevictable(pages[i], vma); lru_cache_add_active_or_unevictable(pages[i], vma);
fe->pte = pte_offset_map(&_pmd, haddr); vmf->pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*fe->pte)); VM_BUG_ON(!pte_none(*vmf->pte));
set_pte_at(vma->vm_mm, haddr, fe->pte, entry); set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
pte_unmap(fe->pte); pte_unmap(vmf->pte);
} }
kfree(pages); kfree(pages);
smp_wmb(); /* make pte visible before pmd */ smp_wmb(); /* make pte visible before pmd */
pmd_populate(vma->vm_mm, fe->pmd, pgtable); pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
page_remove_rmap(page, true); page_remove_rmap(page, true);
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
...@@ -993,7 +993,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, ...@@ -993,7 +993,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
return ret; return ret;
out_free_pages: out_free_pages:
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
for (i = 0; i < HPAGE_PMD_NR; i++) { for (i = 0; i < HPAGE_PMD_NR; i++) {
memcg = (void *)page_private(pages[i]); memcg = (void *)page_private(pages[i]);
...@@ -1005,23 +1005,23 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, ...@@ -1005,23 +1005,23 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
goto out; goto out;
} }
int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
{ {
struct vm_area_struct *vma = fe->vma; struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL, *new_page; struct page *page = NULL, *new_page;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
unsigned long haddr = fe->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */
gfp_t huge_gfp; /* for allocation and charge */ gfp_t huge_gfp; /* for allocation and charge */
int ret = 0; int ret = 0;
fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd); vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
VM_BUG_ON_VMA(!vma->anon_vma, vma); VM_BUG_ON_VMA(!vma->anon_vma, vma);
if (is_huge_zero_pmd(orig_pmd)) if (is_huge_zero_pmd(orig_pmd))
goto alloc; goto alloc;
spin_lock(fe->ptl); spin_lock(vmf->ptl);
if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto out_unlock; goto out_unlock;
page = pmd_page(orig_pmd); page = pmd_page(orig_pmd);
...@@ -1034,13 +1034,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) ...@@ -1034,13 +1034,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
pmd_t entry; pmd_t entry;
entry = pmd_mkyoung(orig_pmd); entry = pmd_mkyoung(orig_pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
update_mmu_cache_pmd(vma, fe->address, fe->pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
ret |= VM_FAULT_WRITE; ret |= VM_FAULT_WRITE;
goto out_unlock; goto out_unlock;
} }
get_page(page); get_page(page);
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
alloc: alloc:
if (transparent_hugepage_enabled(vma) && if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) { !transparent_hugepage_debug_cow()) {
...@@ -1053,12 +1053,12 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) ...@@ -1053,12 +1053,12 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
prep_transhuge_page(new_page); prep_transhuge_page(new_page);
} else { } else {
if (!page) { if (!page) {
split_huge_pmd(vma, fe->pmd, fe->address); split_huge_pmd(vma, vmf->pmd, vmf->address);
ret |= VM_FAULT_FALLBACK; ret |= VM_FAULT_FALLBACK;
} else { } else {
ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page); ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
if (ret & VM_FAULT_OOM) { if (ret & VM_FAULT_OOM) {
split_huge_pmd(vma, fe->pmd, fe->address); split_huge_pmd(vma, vmf->pmd, vmf->address);
ret |= VM_FAULT_FALLBACK; ret |= VM_FAULT_FALLBACK;
} }
put_page(page); put_page(page);
...@@ -1070,7 +1070,7 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) ...@@ -1070,7 +1070,7 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
huge_gfp, &memcg, true))) { huge_gfp, &memcg, true))) {
put_page(new_page); put_page(new_page);
split_huge_pmd(vma, fe->pmd, fe->address); split_huge_pmd(vma, vmf->pmd, vmf->address);
if (page) if (page)
put_page(page); put_page(page);
ret |= VM_FAULT_FALLBACK; ret |= VM_FAULT_FALLBACK;
...@@ -1090,11 +1090,11 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) ...@@ -1090,11 +1090,11 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
mmun_end = haddr + HPAGE_PMD_SIZE; mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
spin_lock(fe->ptl); spin_lock(vmf->ptl);
if (page) if (page)
put_page(page); put_page(page);
if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
mem_cgroup_cancel_charge(new_page, memcg, true); mem_cgroup_cancel_charge(new_page, memcg, true);
put_page(new_page); put_page(new_page);
goto out_mn; goto out_mn;
...@@ -1102,12 +1102,12 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) ...@@ -1102,12 +1102,12 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
pmd_t entry; pmd_t entry;
entry = mk_huge_pmd(new_page, vma->vm_page_prot); entry = mk_huge_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
page_add_new_anon_rmap(new_page, vma, haddr, true); page_add_new_anon_rmap(new_page, vma, haddr, true);
mem_cgroup_commit_charge(new_page, memcg, false, true); mem_cgroup_commit_charge(new_page, memcg, false, true);
lru_cache_add_active_or_unevictable(new_page, vma); lru_cache_add_active_or_unevictable(new_page, vma);
set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, fe->address, fe->pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
if (!page) { if (!page) {
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
} else { } else {
...@@ -1117,13 +1117,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) ...@@ -1117,13 +1117,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
} }
ret |= VM_FAULT_WRITE; ret |= VM_FAULT_WRITE;
} }
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
out_mn: out_mn:
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
out: out:
return ret; return ret;
out_unlock: out_unlock:
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
return ret; return ret;
} }
...@@ -1196,12 +1196,12 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -1196,12 +1196,12 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
} }
/* NUMA hinting page fault entry point for trans huge pmds */ /* NUMA hinting page fault entry point for trans huge pmds */
int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
{ {
struct vm_area_struct *vma = fe->vma; struct vm_area_struct *vma = vmf->vma;
struct anon_vma *anon_vma = NULL; struct anon_vma *anon_vma = NULL;
struct page *page; struct page *page;
unsigned long haddr = fe->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int page_nid = -1, this_nid = numa_node_id(); int page_nid = -1, this_nid = numa_node_id();
int target_nid, last_cpupid = -1; int target_nid, last_cpupid = -1;
bool page_locked; bool page_locked;
...@@ -1209,8 +1209,8 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) ...@@ -1209,8 +1209,8 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
bool was_writable; bool was_writable;
int flags = 0; int flags = 0;
fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(pmd, *fe->pmd))) if (unlikely(!pmd_same(pmd, *vmf->pmd)))
goto out_unlock; goto out_unlock;
/* /*
...@@ -1218,9 +1218,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) ...@@ -1218,9 +1218,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
* without disrupting NUMA hinting information. Do not relock and * without disrupting NUMA hinting information. Do not relock and
* check_same as the page may no longer be mapped. * check_same as the page may no longer be mapped.
*/ */
if (unlikely(pmd_trans_migrating(*fe->pmd))) { if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
page = pmd_page(*fe->pmd); page = pmd_page(*vmf->pmd);
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
wait_on_page_locked(page); wait_on_page_locked(page);
goto out; goto out;
} }
...@@ -1253,7 +1253,7 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) ...@@ -1253,7 +1253,7 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
/* Migration could have started since the pmd_trans_migrating check */ /* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) { if (!page_locked) {
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
wait_on_page_locked(page); wait_on_page_locked(page);
page_nid = -1; page_nid = -1;
goto out; goto out;
...@@ -1264,12 +1264,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) ...@@ -1264,12 +1264,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
* to serialises splits * to serialises splits
*/ */
get_page(page); get_page(page);
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
anon_vma = page_lock_anon_vma_read(page); anon_vma = page_lock_anon_vma_read(page);
/* Confirm the PMD did not change while page_table_lock was released */ /* Confirm the PMD did not change while page_table_lock was released */
spin_lock(fe->ptl); spin_lock(vmf->ptl);
if (unlikely(!pmd_same(pmd, *fe->pmd))) { if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
page_nid = -1; page_nid = -1;
...@@ -1287,9 +1287,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) ...@@ -1287,9 +1287,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
* Migrate the THP to the requested node, returns with page unlocked * Migrate the THP to the requested node, returns with page unlocked
* and access rights restored. * and access rights restored.
*/ */
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
fe->pmd, pmd, fe->address, page, target_nid); vmf->pmd, pmd, vmf->address, page, target_nid);
if (migrated) { if (migrated) {
flags |= TNF_MIGRATED; flags |= TNF_MIGRATED;
page_nid = target_nid; page_nid = target_nid;
...@@ -1304,18 +1304,19 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) ...@@ -1304,18 +1304,19 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
pmd = pmd_mkyoung(pmd); pmd = pmd_mkyoung(pmd);
if (was_writable) if (was_writable)
pmd = pmd_mkwrite(pmd); pmd = pmd_mkwrite(pmd);
set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
update_mmu_cache_pmd(vma, fe->address, fe->pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
unlock_page(page); unlock_page(page);
out_unlock: out_unlock:
spin_unlock(fe->ptl); spin_unlock(vmf->ptl);
out: out:
if (anon_vma) if (anon_vma)
page_unlock_anon_vma_read(anon_vma); page_unlock_anon_vma_read(anon_vma);
if (page_nid != -1) if (page_nid != -1)
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags); task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
vmf->flags);
return 0; return 0;
} }
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
/* Do not use these with a slab allocator */ /* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
int do_swap_page(struct fault_env *fe, pte_t orig_pte); int do_swap_page(struct vm_fault *vmf, pte_t orig_pte);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling); unsigned long floor, unsigned long ceiling);
......
...@@ -877,7 +877,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -877,7 +877,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
{ {
pte_t pteval; pte_t pteval;
int swapped_in = 0, ret = 0; int swapped_in = 0, ret = 0;
struct fault_env fe = { struct vm_fault vmf = {
.vma = vma, .vma = vma,
.address = address, .address = address,
.flags = FAULT_FLAG_ALLOW_RETRY, .flags = FAULT_FLAG_ALLOW_RETRY,
...@@ -889,19 +889,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -889,19 +889,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false; return false;
} }
fe.pte = pte_offset_map(pmd, address); vmf.pte = pte_offset_map(pmd, address);
for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
fe.pte++, fe.address += PAGE_SIZE) { vmf.pte++, vmf.address += PAGE_SIZE) {
pteval = *fe.pte; pteval = *vmf.pte;
if (!is_swap_pte(pteval)) if (!is_swap_pte(pteval))
continue; continue;
swapped_in++; swapped_in++;
ret = do_swap_page(&fe, pteval); ret = do_swap_page(&vmf, pteval);
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
if (hugepage_vma_revalidate(mm, address, &fe.vma)) { if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
/* vma is no longer available, don't continue to swapin */ /* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false; return false;
...@@ -915,10 +915,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -915,10 +915,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
return false; return false;
} }
/* pte is unmapped now, we need to map it */ /* pte is unmapped now, we need to map it */
fe.pte = pte_offset_map(pmd, fe.address); vmf.pte = pte_offset_map(pmd, vmf.address);
} }
fe.pte--; vmf.pte--;
pte_unmap(fe.pte); pte_unmap(vmf.pte);
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
return true; return true;
} }
......
此差异已折叠。
...@@ -1801,7 +1801,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1801,7 +1801,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct fault_env *fe, void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff) pgoff_t start_pgoff, pgoff_t end_pgoff)
{ {
BUG(); BUG();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册