提交 43ca4957 编写于 作者: K Kautuk Consul 提交者: Ralf Baechle

MIPS: fault.c: Port OOM changes to do_page_fault

Commit d065bd81
(mm: retry page fault when blocking on disk transfer) and
commit 37b23e05
(x86,mm: make pagefault killable)

The above commits introduced changes into the x86 pagefault handler
for making the page fault handler retryable as well as killable.

These changes reduce the mmap_sem hold time, which is crucial
during OOM killer invocation.

Port these changes to MIPS.

Without these changes, my MIPS board encounters many hang and livelock
scenarios.
After applying this patch, OOM feature performance improves according to
my testing.
Signed-off-by: NMohd. Faris <mohdfarisq2010@gmail.com>
Signed-off-by: NKautuk Consul <consul.kautuk@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/3217/Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 58a7fca6
...@@ -42,6 +42,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ ...@@ -42,6 +42,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
const int field = sizeof(unsigned long) * 2; const int field = sizeof(unsigned long) * 2;
siginfo_t info; siginfo_t info;
int fault; int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
#if 0 #if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
...@@ -91,6 +93,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ ...@@ -91,6 +93,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
if (in_atomic() || !mm) if (in_atomic() || !mm)
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (!vma) if (!vma)
...@@ -144,7 +147,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ ...@@ -144,7 +147,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
...@@ -153,13 +160,28 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ ...@@ -153,13 +160,28 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
} }
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
tsk->maj_flt++; tsk->maj_flt++;
} else { } else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
tsk->min_flt++; tsk->min_flt++;
} }
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/*
* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册