提交 b344e24a 编写于 作者: M Matt Fleming

sh: unwinder: Introduce UNWINDER_BUG() and UNWINDER_BUG_ON()

We can't assume that if we execute the unwinder code and the unwinder
was already running that it has faulted. Clearly two kernel threads can
invoke the unwinder at the same time and may be running simultaneously.

The previous approach used BUG() and BUG_ON() in the unwinder code to
detect whether the unwinder was incapable of unwinding the stack, and
that the next available unwinder should be used instead. A better
approach is to explicitly invoke a trap handler to switch unwinders when
the current unwinder cannot continue.
Signed-off-by: NMatt Fleming <matt@console-pimps.org>
上级 97efbbd5
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
#define TRAPA_UNWINDER_BUG_OPCODE 0xc33b /* trapa #0x3b */
#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
#ifdef CONFIG_GENERIC_BUG
......@@ -72,6 +73,30 @@ do { \
unlikely(__ret_warn_on); \
})
#define UNWINDER_BUG() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_UNWINDER_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define UNWINDER_BUG_ON(x) ({ \
int __ret_unwinder_on = !!(x); \
if (__builtin_constant_p(__ret_unwinder_on)) { \
if (__ret_unwinder_on) \
UNWINDER_BUG(); \
} else { \
if (unlikely(__ret_unwinder_on)) \
UNWINDER_BUG(); \
} \
unlikely(__ret_unwinder_on); \
})
#endif /* CONFIG_GENERIC_BUG */
#include <asm-generic/bug.h>
......
......@@ -181,6 +181,11 @@ BUILD_TRAP_HANDLER(breakpoint);
BUILD_TRAP_HANDLER(singlestep);
BUILD_TRAP_HANDLER(fpu_error);
BUILD_TRAP_HANDLER(fpu_state_restore);
BUILD_TRAP_HANDLER(unwinder);
#ifdef CONFIG_BUG
extern void handle_BUG(struct pt_regs *);
#endif
#define arch_align_stack(x) (x)
......
......@@ -22,4 +22,10 @@ extern void stack_reader_dump(struct task_struct *, struct pt_regs *,
unsigned long *, const struct stacktrace_ops *,
void *);
/*
* Used by fault handling code to signal to the unwinder code that it
* should switch to a different unwinder.
*/
extern int unwinder_faulted;
#endif /* _LINUX_UNWINDER_H */
......@@ -19,6 +19,10 @@
#if !defined(CONFIG_SH_STANDARD_BIOS)
#define sh_bios_handler debug_trap_handler
#endif
#if !defined(CONFIG_DWARF_UNWINDER)
#define unwinder_trap_handler debug_trap_handler
#endif
.data
......@@ -35,7 +39,7 @@ ENTRY(debug_trap_table)
.long debug_trap_handler /* 0x38 */
.long debug_trap_handler /* 0x39 */
.long debug_trap_handler /* 0x3a */
.long debug_trap_handler /* 0x3b */
.long unwinder_trap_handler /* 0x3b */
.long breakpoint_trap_handler /* 0x3c */
.long singlestep_trap_handler /* 0x3d */
.long bug_trap_handler /* 0x3e */
......
......@@ -69,7 +69,7 @@ static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
* Let's just bomb hard here, we have no way to
* gracefully recover.
*/
BUG();
UNWINDER_BUG();
}
reg->number = reg_num;
......@@ -232,7 +232,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
break;
default:
pr_debug("encoding=0x%x\n", (encoding & 0x70));
BUG();
UNWINDER_BUG();
}
if ((encoding & 0x07) == 0x00)
......@@ -247,7 +247,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
break;
default:
pr_debug("encoding=0x%x\n", encoding);
BUG();
UNWINDER_BUG();
}
return count;
......@@ -519,6 +519,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
break;
default:
pr_debug("unhandled DWARF instruction 0x%x\n", insn);
UNWINDER_BUG();
break;
}
}
......@@ -535,8 +536,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
* on the callstack. Each of the lower (older) stack frames are
* linked via the "prev" member.
*/
struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *prev)
struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *prev)
{
struct dwarf_frame *frame;
struct dwarf_cie *cie;
......@@ -558,7 +559,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
if (!frame) {
printk(KERN_ERR "Unable to allocate a dwarf frame\n");
BUG();
UNWINDER_BUG();
}
INIT_LIST_HEAD(&frame->reg_list);
......@@ -605,7 +606,8 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
case DWARF_FRAME_CFA_REG_OFFSET:
if (prev) {
reg = dwarf_frame_reg(prev, frame->cfa_register);
BUG_ON(!reg);
UNWINDER_BUG_ON(!reg);
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
addr = prev->cfa + reg->addr;
frame->cfa = __raw_readl(addr);
......@@ -624,12 +626,13 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
frame->cfa += frame->cfa_offset;
break;
default:
BUG();
UNWINDER_BUG();
}
/* If we haven't seen the return address reg, we're screwed. */
reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
BUG_ON(!reg);
UNWINDER_BUG_ON(!reg);
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
addr = frame->cfa + reg->addr;
frame->return_addr = __raw_readl(addr);
......@@ -664,7 +667,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
cie->cie_pointer = (unsigned long)entry;
cie->version = *(char *)p++;
BUG_ON(cie->version != 1);
UNWINDER_BUG_ON(cie->version != 1);
cie->augmentation = p;
p += strlen(cie->augmentation) + 1;
......@@ -694,7 +697,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
count = dwarf_read_uleb128(p, &length);
p += count;
BUG_ON((unsigned char *)p > end);
UNWINDER_BUG_ON((unsigned char *)p > end);
cie->initial_instructions = p + length;
cie->augmentation++;
......@@ -722,16 +725,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
* routine in the CIE
* augmentation.
*/
BUG();
UNWINDER_BUG();
} else if (*cie->augmentation == 'S') {
BUG();
UNWINDER_BUG();
} else {
/*
* Unknown augmentation. Assume
* 'z' augmentation.
*/
p = cie->initial_instructions;
BUG_ON(!p);
UNWINDER_BUG_ON(!p);
break;
}
}
......@@ -805,9 +808,11 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
return 0;
}
static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
static void dwarf_unwinder_dump(struct task_struct *task,
struct pt_regs *regs,
unsigned long *sp,
const struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops,
void *data)
{
struct dwarf_frame *frame, *_frame;
unsigned long return_addr;
......@@ -831,7 +836,6 @@ static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
return_addr = frame->return_addr;
ops->address(data, return_addr, 1);
}
}
static struct unwinder dwarf_unwinder = {
......
......@@ -8,7 +8,7 @@
#include <asm/system.h>
#ifdef CONFIG_BUG
static void handle_BUG(struct pt_regs *regs)
void handle_BUG(struct pt_regs *regs)
{
enum bug_trap_type tt;
tt = report_bug(regs->pc, regs);
......@@ -29,7 +29,10 @@ int is_valid_bugaddr(unsigned long addr)
if (probe_kernel_address((insn_size_t *)addr, opcode))
return 0;
return opcode == TRAPA_BUG_OPCODE;
if (opcode == TRAPA_BUG_OPCODE || opcode == TRAPA_UNWINDER_BUG_OPCODE)
return 1;
return 0;
}
#endif
......
......@@ -136,6 +136,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
......
......@@ -53,8 +53,6 @@ static struct list_head unwinder_list = {
static DEFINE_SPINLOCK(unwinder_lock);
static atomic_t unwinder_running = ATOMIC_INIT(0);
/**
* select_unwinder - Select the best registered stack unwinder.
*
......@@ -122,6 +120,8 @@ int unwinder_register(struct unwinder *u)
return ret;
}
int unwinder_faulted = 0;
/*
* Unwind the call stack and pass information to the stacktrace_ops
* functions. Also handle the case where we need to switch to a new
......@@ -144,19 +144,40 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs,
* Hopefully this will give us a semi-reliable stacktrace so we
* can diagnose why curr_unwinder->dump() faulted.
*/
if (atomic_inc_return(&unwinder_running) != 1) {
if (unwinder_faulted) {
spin_lock_irqsave(&unwinder_lock, flags);
if (!list_is_singular(&unwinder_list)) {
/* Make sure no one beat us to changing the unwinder */
if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
list_del(&curr_unwinder->list);
curr_unwinder = select_unwinder();
unwinder_faulted = 0;
}
spin_unlock_irqrestore(&unwinder_lock, flags);
atomic_dec(&unwinder_running);
}
curr_unwinder->dump(task, regs, sp, ops, data);
}
/*
* Trap handler for UWINDER_BUG() statements. We must switch to the
* unwinder with the next highest rating.
*/
BUILD_TRAP_HANDLER(unwinder)
{
insn_size_t insn;
TRAP_HANDLER_DECL;
/* Rewind */
regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
insn = *(insn_size_t *)instruction_pointer(regs);
/* Switch unwinders when unwind_stack() is called */
unwinder_faulted = 1;
atomic_dec(&unwinder_running);
#ifdef CONFIG_BUG
handle_BUG(regs);
#endif
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册