diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4a905bd667e2ef71542e2585469404478860bce5..83e9eee57a55a165d57ffec0c6fe58c4b78300ea 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -147,7 +147,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index af63f4a13e605eda26bcb7de54595bda5ecad997..e94e5aa33985c540b279df6cd6237c896f78d77a 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -137,7 +137,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (unlikely(fatal_signal_pending(current))) { diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ad5841856007a4ee0f46b6c4c40dcdfbcbf0aac1..3a2e678b8d30cabfb058fd82bb1d3336e3dab02d 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -243,7 +243,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + return handle_mm_fault(vma, addr & PAGE_MASK, flags); check_stack: /* Don't allow expansion below FIRST_USER_ADDRESS */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b1166d1e5955cd80096ea7412a2371008ec81b7c..031820d989a840afb142e91efa52ccc4e47ab3da 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -233,7 +233,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index c03533937a9f0aa273a75c76ecb66f2731b2d39c..a4b7edac8f109a3a9eed44c05e63b43ec437897e 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -134,7 +134,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 3066d40a6db14425c162d399d89e5c6db66786fe..112ef26c7f2e8346372f6fc3a50d543af610d95f 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -168,7 +168,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 61d99767fe1691e70286bf8d52a05aee506bbccc..614a46c413d237afdc3a18e03f258785cff9a254 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, ear0, flags); + fault = handle_mm_fault(vma, ear0, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 8704c9320032705cf7de10d6a94f3c4d70cf8b12..bd7c251e2bced3424c6ad74c5ec8e11c6e42dd9f 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -101,7 +101,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) break; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 70b40d1205a6b9b3ec7efcbc9e60ec64c2eff712..fa6ad95e992e3690d623dd267e3c651b76f9bb9d 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -159,7 +159,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 8f9875b7933d5582277a777693a2e44c1483d362..a3785d3644c233f7f8e3ca6406cae54f3531ee11 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -196,7 +196,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - fault = handle_mm_fault(mm, vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 6a94cdd0c8308cb70151d477872ccbeda2022ea8..bd66a0b20c6b79dd51142822e84db79051b1a3a9 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -136,7 +136,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); pr_debug("handle_mm_fault returns %d\n", fault); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index f57edca63609bf15f1a71bf5a90c8837d6a9fd13..372783a67dda1a457e4dafc9139324133a24d564 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -133,7 +133,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 177dfc0036436284d4e016b0987e4516faa7f445..abb678ccde6ff5ddc7829de582245a5aaa811293 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -216,7 +216,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 4b88fa031891d474c715db30479218e8216aa948..9560ad73112093a2b3c1573b4c54e5993e97328c 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -153,7 +153,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 4a1d181ed32f7690a82cfcba3eb264f9311b0a0f..f23781d6bbb3fed5c763249a52afc3858d2a12fe 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -254,7 +254,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index b51878b0c6b87362074c68832c1b4355f7c127cc..affc4eb3f89efa7e98bd9cccc3145520f158270e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -131,7 +131,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 230ac20ae7944f71636e5083fdaf3f034eb10af2..e94cd225e8161c2aa5e5e63019dd15e5b89be114 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -163,7 +163,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 16dbe81c97c9005df3cbb91045ccb6ed20878930..163af2c31d761ec46aec845217676be8ec8ae408 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -239,7 +239,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 6527882ce05ede3a0a45f74d3a11c4c375da6514..bb0354222b1158c57133a09ef50b98cf20de6f29 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a67c6d781c52b9a28ac90b0c6cc1650519e9260c..a4db22f6502150a25a082769edc54ee58bf7211d 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -429,7 +429,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (fault & VM_FAULT_SIGSEGV) goto bad_area; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 19288c1b36d32bfed0013e6a90385d8d4c82d08c..6c47488745ae4669939f996db7c673b43e0c5864 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -456,7 +456,7 @@ static inline int do_exception(struct pt_regs *regs, int access) * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* No reason to continue if interrupted by SIGKILL. */ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { fault = VM_FAULT_SIGNAL; diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 37a6c2e0e96926f26902484969befaf860fc52fc..995b71e4db4bafe3100ca5f4d62c9c2aff2af136 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -111,7 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 79d8276377d1e2f62e6c9231f735ef0f1c5ca22e..9bf876780cef4b1ea4b2aee99e47997ffb1f8b97 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -487,7 +487,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b6c559cbd64da3348cb76aee8db6d47f01c9da67..4714061d6cd335d2258aebc22bb03c99a6c84317 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -241,7 +241,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; @@ -411,7 +411,7 @@ static void force_user_fault(unsigned long address, int write) if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(mm, vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index cb841a33da59061d6f435cb8cb7c5f717284f817..6c43b924a7a2bae17cfd98b4b7de0be64b16ad13 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -436,7 +436,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) goto bad_area; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto exit_exception; diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 26734214818c8860ba671c48e239b2241c9bd1f1..beba986589e5851aea236163a6b6f4031d6ed5a7 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -434,7 +434,7 @@ static int handle_page_fault(struct pt_regs *regs, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 98783dd0fa2ea697a50cb65afb661aa7f7bf0322..ad8f206ab5e8516c42c08fa0d7e15739e06f4484 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -73,7 +73,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, do { int fault; - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2ec3d3adcefc948870ab4419575aecab3f6f47e7..6c7f70bcaae3263b1b9a229c87739c1bd25b68e1 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -194,7 +194,7 @@ static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags); return fault; check_stack: diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d22161ab941d51aee4ba4ec74d8858bc8b164c10..dc802306045653be7177fd619cdc3dffbcd92815 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1353,7 +1353,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); major |= fault & VM_FAULT_MAJOR; /* diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 7f4a1fdb1502005aa23431a5f36d568882f0aacf..2725e08ef353798956d4378085d55ae00eeba303 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -110,7 +110,7 @@ void do_page_fault(struct pt_regs *regs) * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 56999d2fac070483dc72ce662e32567fb63ebb09..fbdaf81ae925608edf25295e490bbd50cf0d4281 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(mm, vma, address, flags); - + ret = handle_mm_fault(vma, address, flags); out: up_read(&mm->mmap_sem); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index d9939fa9b58887567d1fe8a7a94c7e5f1452cdf0..8ebb3530afa7574810f849e7a333c19473740304 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (access_error(vma, req)) goto invalid; - ret = handle_mm_fault(svm->mm, vma, address, + ret = handle_mm_fault(vma, address, req->wr_req ? FAULT_FLAG_WRITE : 0); if (ret & VM_FAULT_ERROR) goto invalid; diff --git a/include/linux/mm.h b/include/linux/mm.h index 6c9a394b29794bd20e3a60df49e36fae2e4f8c1d..646bc36b4d1b7e61bc74b346a42fc3a8fbbab31e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1215,15 +1215,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU -extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags); +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); #else -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) +static inline int handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) { /* should never happen if there's no MMU */ BUG(); diff --git a/mm/gup.c b/mm/gup.c index dee142e100f4cbf988a1bf7a8fe8d02501d724d6..9671e29f8ffd0e59ba5384a39e95b2834ffab133 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -352,7 +352,6 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { - struct mm_struct *mm = vma->vm_mm; unsigned int fault_flags = 0; int ret; @@ -377,7 +376,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, fault_flags |= FAULT_FLAG_TRIED; } - ret = handle_mm_fault(mm, vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; @@ -692,7 +691,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; - ret = handle_mm_fault(mm, vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) diff --git a/mm/ksm.c b/mm/ksm.c index 35b8aef867a92ee79aff5cb074558f804a6fdc6a..73d43bafd9fbc41ad322b9d26e80e548481a23b4 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -376,9 +376,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) - ret = handle_mm_fault(vma->vm_mm, vma, addr, - FAULT_FLAG_WRITE | - FAULT_FLAG_REMOTE); + ret = handle_mm_fault(vma, addr, + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); else ret = VM_FAULT_WRITE; put_page(page); diff --git a/mm/memory.c b/mm/memory.c index 5e6eadd127e7bcb42905883377abe6dab49dec94..6bf2b8564376fd3819a8907c15dce91be6696b4a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3420,9 +3420,10 @@ static int handle_pte_fault(struct mm_struct *mm, * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) { + struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -3509,15 +3510,15 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) { int ret; __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); - mem_cgroup_count_vm_event(mm, PGFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT); /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); @@ -3529,7 +3530,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (flags & FAULT_FLAG_USER) mem_cgroup_oom_enable(); - ret = __handle_mm_fault(mm, vma, address, flags); + ret = __handle_mm_fault(vma, address, flags); if (flags & FAULT_FLAG_USER) { mem_cgroup_oom_disable();