提交 dcddffd4 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds
上级 6fb8ddfc
...@@ -147,7 +147,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -147,7 +147,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If for any reason at all we couldn't handle the fault, /* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo make sure we exit gracefully rather than endlessly redo
the fault. */ the fault. */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -137,7 +137,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) ...@@ -137,7 +137,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (unlikely(fatal_signal_pending(current))) { if (unlikely(fatal_signal_pending(current))) {
......
...@@ -243,7 +243,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, ...@@ -243,7 +243,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
goto out; goto out;
} }
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); return handle_mm_fault(vma, addr & PAGE_MASK, flags);
check_stack: check_stack:
/* Don't allow expansion below FIRST_USER_ADDRESS */ /* Don't allow expansion below FIRST_USER_ADDRESS */
......
...@@ -233,7 +233,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, ...@@ -233,7 +233,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
goto out; goto out;
} }
return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
check_stack: check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
......
...@@ -134,7 +134,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) ...@@ -134,7 +134,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
* sure we exit gracefully rather than endlessly redo the * sure we exit gracefully rather than endlessly redo the
* fault. * fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -168,7 +168,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, ...@@ -168,7 +168,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear ...@@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, ear0, flags); fault = handle_mm_fault(vma, ear0, flags);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
......
...@@ -101,7 +101,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) ...@@ -101,7 +101,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
break; break;
} }
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -159,7 +159,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -159,7 +159,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the * sure we exit gracefully rather than endlessly redo the
* fault. * fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -196,7 +196,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -196,7 +196,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/ */
addr = (address & PAGE_MASK); addr = (address & PAGE_MASK);
set_thread_fault_code(error_code); set_thread_fault_code(error_code);
fault = handle_mm_fault(mm, vma, addr, flags); fault = handle_mm_fault(vma, addr, flags);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
......
...@@ -136,7 +136,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -136,7 +136,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
pr_debug("handle_mm_fault returns %d\n", fault); pr_debug("handle_mm_fault returns %d\n", fault);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
......
...@@ -133,7 +133,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -133,7 +133,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0; return 0;
......
...@@ -216,7 +216,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -216,7 +216,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -153,7 +153,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -153,7 +153,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -254,7 +254,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, ...@@ -254,7 +254,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -131,7 +131,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, ...@@ -131,7 +131,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -239,7 +239,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, ...@@ -239,7 +239,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
* fault. * fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, ...@@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
} }
ret = 0; ret = 0;
*flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(*flt & VM_FAULT_ERROR)) { if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) { if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -429,7 +429,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -429,7 +429,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (fault & VM_FAULT_SIGSEGV) if (fault & VM_FAULT_SIGSEGV)
goto bad_area; goto bad_area;
......
...@@ -456,7 +456,7 @@ static inline int do_exception(struct pt_regs *regs, int access) ...@@ -456,7 +456,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
/* No reason to continue if interrupted by SIGKILL. */ /* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL; fault = VM_FAULT_SIGNAL;
......
...@@ -111,7 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -111,7 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
......
...@@ -487,7 +487,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -487,7 +487,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
if (mm_fault_error(regs, error_code, address, fault)) if (mm_fault_error(regs, error_code, address, fault))
......
...@@ -241,7 +241,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -241,7 +241,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
...@@ -411,7 +411,7 @@ static void force_user_fault(unsigned long address, int write) ...@@ -411,7 +411,7 @@ static void force_user_fault(unsigned long address, int write)
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area; goto bad_area;
} }
switch (handle_mm_fault(mm, vma, address, flags)) { switch (handle_mm_fault(vma, address, flags)) {
case VM_FAULT_SIGBUS: case VM_FAULT_SIGBUS:
case VM_FAULT_OOM: case VM_FAULT_OOM:
goto do_sigbus; goto do_sigbus;
......
...@@ -436,7 +436,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -436,7 +436,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
goto bad_area; goto bad_area;
} }
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto exit_exception; goto exit_exception;
......
...@@ -434,7 +434,7 @@ static int handle_page_fault(struct pt_regs *regs, ...@@ -434,7 +434,7 @@ static int handle_page_fault(struct pt_regs *regs,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0; return 0;
......
...@@ -73,7 +73,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, ...@@ -73,7 +73,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
do { do {
int fault; int fault;
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto out_nosemaphore; goto out_nosemaphore;
......
...@@ -194,7 +194,7 @@ static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, ...@@ -194,7 +194,7 @@ static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* If for any reason at all we couldn't handle the fault, make * If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault. * sure we exit gracefully rather than endlessly redo the fault.
*/ */
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); fault = handle_mm_fault(vma, addr & PAGE_MASK, flags);
return fault; return fault;
check_stack: check_stack:
......
...@@ -1353,7 +1353,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1353,7 +1353,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR; major |= fault & VM_FAULT_MAJOR;
/* /*
......
...@@ -110,7 +110,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -110,7 +110,7 @@ void do_page_fault(struct pt_regs *regs)
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; return;
......
...@@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work) ...@@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work)
if (access_error(vma, fault)) if (access_error(vma, fault))
goto out; goto out;
ret = handle_mm_fault(mm, vma, address, flags); ret = handle_mm_fault(vma, address, flags);
out: out:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
......
...@@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (access_error(vma, req)) if (access_error(vma, req))
goto invalid; goto invalid;
ret = handle_mm_fault(svm->mm, vma, address, ret = handle_mm_fault(vma, address,
req->wr_req ? FAULT_FLAG_WRITE : 0); req->wr_req ? FAULT_FLAG_WRITE : 0);
if (ret & VM_FAULT_ERROR) if (ret & VM_FAULT_ERROR)
goto invalid; goto invalid;
......
...@@ -1215,15 +1215,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); ...@@ -1215,15 +1215,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page); int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned long address, unsigned int flags); unsigned int flags);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags, unsigned long address, unsigned int fault_flags,
bool *unlocked); bool *unlocked);
#else #else
static inline int handle_mm_fault(struct mm_struct *mm, static inline int handle_mm_fault(struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long address, unsigned long address, unsigned int flags)
unsigned int flags)
{ {
/* should never happen if there's no MMU */ /* should never happen if there's no MMU */
BUG(); BUG();
......
...@@ -352,7 +352,6 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, ...@@ -352,7 +352,6 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *nonblocking) unsigned long address, unsigned int *flags, int *nonblocking)
{ {
struct mm_struct *mm = vma->vm_mm;
unsigned int fault_flags = 0; unsigned int fault_flags = 0;
int ret; int ret;
...@@ -377,7 +376,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, ...@@ -377,7 +376,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
fault_flags |= FAULT_FLAG_TRIED; fault_flags |= FAULT_FLAG_TRIED;
} }
ret = handle_mm_fault(mm, vma, address, fault_flags); ret = handle_mm_fault(vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM) if (ret & VM_FAULT_OOM)
return -ENOMEM; return -ENOMEM;
...@@ -692,7 +691,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, ...@@ -692,7 +691,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
if (!vma_permits_fault(vma, fault_flags)) if (!vma_permits_fault(vma, fault_flags))
return -EFAULT; return -EFAULT;
ret = handle_mm_fault(mm, vma, address, fault_flags); ret = handle_mm_fault(vma, address, fault_flags);
major |= ret & VM_FAULT_MAJOR; major |= ret & VM_FAULT_MAJOR;
if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM) if (ret & VM_FAULT_OOM)
......
...@@ -376,9 +376,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) ...@@ -376,9 +376,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
if (IS_ERR_OR_NULL(page)) if (IS_ERR_OR_NULL(page))
break; break;
if (PageKsm(page)) if (PageKsm(page))
ret = handle_mm_fault(vma->vm_mm, vma, addr, ret = handle_mm_fault(vma, addr,
FAULT_FLAG_WRITE | FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
FAULT_FLAG_REMOTE);
else else
ret = VM_FAULT_WRITE; ret = VM_FAULT_WRITE;
put_page(page); put_page(page);
......
...@@ -3420,9 +3420,10 @@ static int handle_pte_fault(struct mm_struct *mm, ...@@ -3420,9 +3420,10 @@ static int handle_pte_fault(struct mm_struct *mm,
* The mmap_sem may have been released depending on flags and our * The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry(). * return value. See filemap_fault() and __lock_page_or_retry().
*/ */
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned long address, unsigned int flags) unsigned int flags)
{ {
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
...@@ -3509,15 +3510,15 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3509,15 +3510,15 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* The mmap_sem may have been released depending on flags and our * The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry(). * return value. See filemap_fault() and __lock_page_or_retry().
*/ */
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned long address, unsigned int flags) unsigned int flags)
{ {
int ret; int ret;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT); count_vm_event(PGFAULT);
mem_cgroup_count_vm_event(mm, PGFAULT); mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT);
/* do counter updates before entering really critical section. */ /* do counter updates before entering really critical section. */
check_sync_rss_stat(current); check_sync_rss_stat(current);
...@@ -3529,7 +3530,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3529,7 +3530,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (flags & FAULT_FLAG_USER) if (flags & FAULT_FLAG_USER)
mem_cgroup_oom_enable(); mem_cgroup_oom_enable();
ret = __handle_mm_fault(mm, vma, address, flags); ret = __handle_mm_fault(vma, address, flags);
if (flags & FAULT_FLAG_USER) { if (flags & FAULT_FLAG_USER) {
mem_cgroup_oom_disable(); mem_cgroup_oom_disable();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册