提交 1a8d05a7 编写于 作者: L Linus Torvalds

Merge tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull VM_FAULT_RETRY fixes from Al Viro:
 "Some of the page fault handlers do not deal with the following case
  correctly:

   - handle_mm_fault() has returned VM_FAULT_RETRY

   - there is a pending fatal signal

   - fault had happened in kernel mode

  Correct action in such case is not "return unconditionally" - fatal
  signals are handled only upon return to userland and something like
  copy_to_user() would end up retrying the faulting instruction and
  triggering the same fault again and again.

  What we need to do in such case is to make the caller to treat that as
  failed uaccess attempt - handle exception if there is an exception
  handler for faulting instruction or oops if there isn't one.

  Over the years some architectures had been fixed and now are handling
  that case properly; some still do not. This series should fix the
  remaining ones.

  Status:

   - m68k, riscv, hexagon, parisc: tested/acked by maintainers.

   - alpha, sparc32, sparc64: tested locally - bug has been reproduced
     on the unpatched kernel and verified to be fixed by this series.

   - ia64, microblaze, nios2, openrisc: build, but otherwise completely
     untested"

* tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  openrisc: fix livelock in uaccess
  nios2: fix livelock in uaccess
  microblaze: fix livelock in uaccess
  ia64: fix livelock in uaccess
  sparc: fix livelock in uaccess
  alpha: fix livelock in uaccess
  parisc: fix livelock in uaccess
  hexagon: fix livelock in uaccess
  riscv: fix livelock in uaccess
  m68k: fix livelock in uaccess
......@@ -152,8 +152,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
the fault. */
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -93,8 +93,11 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -136,8 +136,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -138,8 +138,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
fault = handle_mm_fault(vma, address, flags, regs);
pr_debug("handle_mm_fault returns %x\n", fault);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return 0;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -219,8 +219,11 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -136,8 +136,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -162,8 +162,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -308,8 +308,13 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) {
msg = "Page fault: fault signal on kernel memory";
goto no_context;
}
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -326,8 +326,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
no_context(regs, addr);
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -187,8 +187,11 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!from_user)
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
......@@ -424,8 +424,13 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (regs->tstate & TSTATE_PRIV) {
insn = get_fault_insn(regs, insn);
goto handle_kernel_fault;
}
goto exit_exception;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册