提交 fa9d3b4d 编写于 作者: P Paul Mundt

Merge branch 'sh/dwarf-unwinder'

Conflicts:
	arch/sh/kernel/cpu/sh3/entry.S
......@@ -2,6 +2,7 @@
#define __ASM_SH_BUG_H
#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
#define BUGFLAG_UNWINDER (1 << 1)
#ifdef CONFIG_GENERIC_BUG
#define HAVE_ARCH_BUG
......@@ -72,6 +73,36 @@ do { \
unlikely(__ret_warn_on); \
})
#define UNWINDER_BUG() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), \
"i" (BUGFLAG_UNWINDER), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define UNWINDER_BUG_ON(x) ({ \
int __ret_unwinder_on = !!(x); \
if (__builtin_constant_p(__ret_unwinder_on)) { \
if (__ret_unwinder_on) \
UNWINDER_BUG(); \
} else { \
if (unlikely(__ret_unwinder_on)) \
UNWINDER_BUG(); \
} \
unlikely(__ret_unwinder_on); \
})
#else
#define UNWINDER_BUG BUG
#define UNWINDER_BUG_ON BUG_ON
#endif /* CONFIG_GENERIC_BUG */
#include <asm-generic/bug.h>
......
......@@ -265,10 +265,7 @@ struct dwarf_frame {
unsigned long pc;
struct dwarf_reg *regs;
unsigned int num_regs; /* how many regs are allocated? */
unsigned int depth; /* what level are we in the callstack? */
struct list_head reg_list;
unsigned long cfa;
......@@ -292,20 +289,15 @@ struct dwarf_frame {
* @flags: Describes how to calculate the value of this register
*/
struct dwarf_reg {
struct list_head link;
unsigned int number;
unsigned long addr;
unsigned long flags;
#define DWARF_REG_OFFSET (1 << 0)
};
/**
* dwarf_stack - a DWARF stack contains a collection of DWARF frames
* @depth: the number of frames in the stack
* @level: an array of DWARF frames, indexed by stack level
*
*/
struct dwarf_stack {
unsigned int depth;
struct dwarf_frame **level;
#define DWARF_VAL_OFFSET (1 << 1)
#define DWARF_UNDEFINED (1 << 2)
};
/*
......@@ -370,17 +362,16 @@ static inline unsigned int DW_CFA_operand(unsigned long insn)
#define DW_EXT_HI 0xffffffff
#define DW_EXT_DWARF64 DW_EXT_HI
extern void dwarf_unwinder_init(void);
extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
struct dwarf_frame *);
#endif /* __ASSEMBLY__ */
#endif /* !__ASSEMBLY__ */
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_REGISTER .cfi_register
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
#else
......@@ -394,6 +385,7 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#ifndef __ASSEMBLY__
static inline void dwarf_unwinder_init(void)
......
......@@ -181,6 +181,11 @@ BUILD_TRAP_HANDLER(breakpoint);
BUILD_TRAP_HANDLER(singlestep);
BUILD_TRAP_HANDLER(fpu_error);
BUILD_TRAP_HANDLER(fpu_state_restore);
BUILD_TRAP_HANDLER(unwinder);
#ifdef CONFIG_BUG
extern void handle_BUG(struct pt_regs *);
#endif
#define arch_align_stack(x) (x)
......
......@@ -22,4 +22,10 @@ extern void stack_reader_dump(struct task_struct *, struct pt_regs *,
unsigned long *, const struct stacktrace_ops *,
void *);
/*
* Used by fault handling code to signal to the unwinder code that it
* should switch to a different unwinder.
*/
extern int unwinder_faulted;
#endif /* _LINUX_UNWINDER_H */
......@@ -508,6 +508,8 @@ ENTRY(handle_interrupt)
bsr save_regs ! needs original pr value in k3
mov #-1, k2 ! default vector kept in k2
setup_frame_reg
stc sr, r0 ! get status register
shlr2 r0
and #0x3c, r0
......
......@@ -11,12 +11,14 @@
*
* TODO:
* - DWARF64 doesn't work.
* - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
*/
/* #define DEBUG */
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/mm.h>
#include <asm/dwarf.h>
#include <asm/unwinder.h>
......@@ -25,55 +27,89 @@
#include <asm/dwarf.h>
#include <asm/stacktrace.h>
/* Reserve enough memory for two stack frames */
#define DWARF_FRAME_MIN_REQ 2
/* ... with 4 registers per frame. */
#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
static struct kmem_cache *dwarf_frame_cachep;
static mempool_t *dwarf_frame_pool;
static struct kmem_cache *dwarf_reg_cachep;
static mempool_t *dwarf_reg_pool;
static LIST_HEAD(dwarf_cie_list);
DEFINE_SPINLOCK(dwarf_cie_lock);
static DEFINE_SPINLOCK(dwarf_cie_lock);
static LIST_HEAD(dwarf_fde_list);
DEFINE_SPINLOCK(dwarf_fde_lock);
static DEFINE_SPINLOCK(dwarf_fde_lock);
static struct dwarf_cie *cached_cie;
/*
* Figure out whether we need to allocate some dwarf registers. If dwarf
* registers have already been allocated then we may need to realloc
* them. "reg" is a register number that we need to be able to access
* after this call.
/**
* dwarf_frame_alloc_reg - allocate memory for a DWARF register
* @frame: the DWARF frame whose list of registers we insert on
* @reg_num: the register number
*
* Allocate space for, and initialise, a dwarf reg from
* dwarf_reg_pool and insert it onto the (unsorted) linked-list of
* dwarf registers for @frame.
*
* Register numbers start at zero, therefore we need to allocate space
* for "reg" + 1 registers.
* Return the initialised DWARF reg.
*/
static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
unsigned int reg)
static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
unsigned int reg_num)
{
struct dwarf_reg *regs;
unsigned int num_regs = reg + 1;
size_t new_size;
size_t old_size;
new_size = num_regs * sizeof(*regs);
old_size = frame->num_regs * sizeof(*regs);
struct dwarf_reg *reg;
/* Fast path: don't allocate any regs if we've already got enough. */
if (frame->num_regs >= num_regs)
return;
regs = kzalloc(new_size, GFP_ATOMIC);
if (!regs) {
printk(KERN_WARNING "Unable to allocate DWARF registers\n");
reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
if (!reg) {
printk(KERN_WARNING "Unable to allocate a DWARF register\n");
/*
* Let's just bomb hard here, we have no way to
* gracefully recover.
*/
BUG();
UNWINDER_BUG();
}
if (frame->regs) {
memcpy(regs, frame->regs, old_size);
kfree(frame->regs);
reg->number = reg_num;
reg->addr = 0;
reg->flags = 0;
list_add(&reg->link, &frame->reg_list);
return reg;
}
static void dwarf_frame_free_regs(struct dwarf_frame *frame)
{
struct dwarf_reg *reg, *n;
list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
list_del(&reg->link);
mempool_free(reg, dwarf_reg_pool);
}
}
frame->regs = regs;
frame->num_regs = num_regs;
/**
* dwarf_frame_reg - return a DWARF register
* @frame: the DWARF frame to search in for @reg_num
* @reg_num: the register number to search for
*
* Lookup and return the dwarf reg @reg_num for this frame. Return
* NULL if @reg_num is an register invalid number.
*/
static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
unsigned int reg_num)
{
struct dwarf_reg *reg;
list_for_each_entry(reg, &frame->reg_list, link) {
if (reg->number == reg_num)
return reg;
}
return NULL;
}
/**
......@@ -196,7 +232,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
break;
default:
pr_debug("encoding=0x%x\n", (encoding & 0x70));
BUG();
UNWINDER_BUG();
}
if ((encoding & 0x07) == 0x00)
......@@ -211,7 +247,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
break;
default:
pr_debug("encoding=0x%x\n", encoding);
BUG();
UNWINDER_BUG();
}
return count;
......@@ -264,7 +300,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
*/
static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
{
struct dwarf_cie *cie, *n;
struct dwarf_cie *cie;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
......@@ -278,7 +314,7 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
goto out;
}
list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) {
list_for_each_entry(cie, &dwarf_cie_list, link) {
if (cie->cie_pointer == cie_ptr) {
cached_cie = cie;
break;
......@@ -299,11 +335,12 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
*/
struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
{
struct dwarf_fde *fde;
unsigned long flags;
struct dwarf_fde *fde, *n;
spin_lock_irqsave(&dwarf_fde_lock, flags);
list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) {
list_for_each_entry(fde, &dwarf_fde_list, link) {
unsigned long start, end;
start = fde->initial_location;
......@@ -346,6 +383,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
unsigned char insn;
unsigned char *current_insn;
unsigned int count, delta, reg, expr_len, offset;
struct dwarf_reg *regp;
current_insn = insn_start;
......@@ -368,9 +406,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
count = dwarf_read_uleb128(current_insn, &offset);
current_insn += count;
offset *= cie->data_alignment_factor;
dwarf_frame_alloc_regs(frame, reg);
frame->regs[reg].addr = offset;
frame->regs[reg].flags |= DWARF_REG_OFFSET;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->addr = offset;
regp->flags |= DWARF_REG_OFFSET;
continue;
/* NOTREACHED */
case DW_CFA_restore:
......@@ -414,6 +452,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
case DW_CFA_undefined:
count = dwarf_read_uleb128(current_insn, &reg);
current_insn += count;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_UNDEFINED;
break;
case DW_CFA_def_cfa:
count = dwarf_read_uleb128(current_insn,
......@@ -452,17 +492,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
count = dwarf_read_leb128(current_insn, &offset);
current_insn += count;
offset *= cie->data_alignment_factor;
dwarf_frame_alloc_regs(frame, reg);
frame->regs[reg].flags |= DWARF_REG_OFFSET;
frame->regs[reg].addr = offset;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_REG_OFFSET;
regp->addr = offset;
break;
case DW_CFA_val_offset:
count = dwarf_read_uleb128(current_insn, &reg);
current_insn += count;
count = dwarf_read_leb128(current_insn, &offset);
offset *= cie->data_alignment_factor;
frame->regs[reg].flags |= DWARF_REG_OFFSET;
frame->regs[reg].addr = offset;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_VAL_OFFSET;
regp->addr = offset;
break;
case DW_CFA_GNU_args_size:
count = dwarf_read_uleb128(current_insn, &offset);
......@@ -473,12 +514,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
current_insn += count;
count = dwarf_read_uleb128(current_insn, &offset);
offset *= cie->data_alignment_factor;
dwarf_frame_alloc_regs(frame, reg);
frame->regs[reg].flags |= DWARF_REG_OFFSET;
frame->regs[reg].addr = -offset;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_REG_OFFSET;
regp->addr = -offset;
break;
default:
pr_debug("unhandled DWARF instruction 0x%x\n", insn);
UNWINDER_BUG();
break;
}
}
......@@ -495,14 +538,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
* on the callstack. Each of the lower (older) stack frames are
* linked via the "prev" member.
*/
struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *prev)
struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *prev)
{
struct dwarf_frame *frame;
struct dwarf_cie *cie;
struct dwarf_fde *fde;
struct dwarf_reg *reg;
unsigned long addr;
int i, offset;
/*
* If this is the first invocation of this recursive function we
......@@ -515,11 +558,16 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
if (!pc && !prev)
pc = (unsigned long)current_text_addr();
frame = kzalloc(sizeof(*frame), GFP_ATOMIC);
if (!frame)
return NULL;
frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
if (!frame) {
printk(KERN_ERR "Unable to allocate a dwarf frame\n");
UNWINDER_BUG();
}
INIT_LIST_HEAD(&frame->reg_list);
frame->flags = 0;
frame->prev = prev;
frame->return_addr = 0;
fde = dwarf_lookup_fde(pc);
if (!fde) {
......@@ -539,7 +587,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
* case above, which sucks because we could print a
* warning here.
*/
return NULL;
goto bail;
}
cie = dwarf_lookup_cie(fde->cie_pointer);
......@@ -559,10 +607,11 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
switch (frame->flags) {
case DWARF_FRAME_CFA_REG_OFFSET:
if (prev) {
BUG_ON(!prev->regs[frame->cfa_register].flags);
reg = dwarf_frame_reg(prev, frame->cfa_register);
UNWINDER_BUG_ON(!reg);
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
addr = prev->cfa;
addr += prev->regs[frame->cfa_register].addr;
addr = prev->cfa + reg->addr;
frame->cfa = __raw_readl(addr);
} else {
......@@ -579,27 +628,30 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
frame->cfa += frame->cfa_offset;
break;
default:
BUG();
UNWINDER_BUG();
}
/* If we haven't seen the return address reg, we're screwed. */
BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
for (i = 0; i <= frame->num_regs; i++) {
struct dwarf_reg *reg = &frame->regs[i];
/*
* If we haven't seen the return address register or the return
* address column is undefined then we must assume that this is
* the end of the callstack.
*/
if (!reg || reg->flags == DWARF_UNDEFINED)
goto bail;
if (!reg->flags)
continue;
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
offset = reg->addr;
offset += frame->cfa;
}
addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
addr = frame->cfa + reg->addr;
frame->return_addr = __raw_readl(addr);
frame->next = dwarf_unwind_stack(frame->return_addr, frame);
return frame;
bail:
dwarf_frame_free_regs(frame);
mempool_free(frame, dwarf_frame_pool);
return NULL;
}
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
......@@ -624,7 +676,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
cie->cie_pointer = (unsigned long)entry;
cie->version = *(char *)p++;
BUG_ON(cie->version != 1);
UNWINDER_BUG_ON(cie->version != 1);
cie->augmentation = p;
p += strlen(cie->augmentation) + 1;
......@@ -654,7 +706,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
count = dwarf_read_uleb128(p, &length);
p += count;
BUG_ON((unsigned char *)p > end);
UNWINDER_BUG_ON((unsigned char *)p > end);
cie->initial_instructions = p + length;
cie->augmentation++;
......@@ -682,16 +734,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
* routine in the CIE
* augmentation.
*/
BUG();
UNWINDER_BUG();
} else if (*cie->augmentation == 'S') {
BUG();
UNWINDER_BUG();
} else {
/*
* Unknown augmentation. Assume
* 'z' augmentation.
*/
p = cie->initial_instructions;
BUG_ON(!p);
UNWINDER_BUG_ON(!p);
break;
}
}
......@@ -708,7 +760,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
}
static int dwarf_parse_fde(void *entry, u32 entry_type,
void *start, unsigned long len)
void *start, unsigned long len,
unsigned char *end)
{
struct dwarf_fde *fde;
struct dwarf_cie *cie;
......@@ -755,7 +808,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
/* Call frame instructions. */
fde->instructions = p;
fde->end = start + len;
fde->end = end;
/* Add to list. */
spin_lock_irqsave(&dwarf_fde_lock, flags);
......@@ -765,17 +818,33 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
return 0;
}
static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
static void dwarf_unwinder_dump(struct task_struct *task,
struct pt_regs *regs,
unsigned long *sp,
const struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops,
void *data)
{
struct dwarf_frame *frame;
struct dwarf_frame *frame, *_frame;
unsigned long return_addr;
_frame = NULL;
return_addr = 0;
frame = dwarf_unwind_stack(0, NULL);
while (1) {
frame = dwarf_unwind_stack(return_addr, _frame);
while (frame && frame->return_addr) {
ops->address(data, frame->return_addr, 1);
frame = frame->next;
if (_frame) {
dwarf_frame_free_regs(_frame);
mempool_free(_frame, dwarf_frame_pool);
}
_frame = frame;
if (!frame || !frame->return_addr)
break;
return_addr = frame->return_addr;
ops->address(data, return_addr, 1);
}
}
......@@ -787,24 +856,22 @@ static struct unwinder dwarf_unwinder = {
static void dwarf_unwinder_cleanup(void)
{
struct dwarf_cie *cie, *m;
struct dwarf_fde *fde, *n;
unsigned long flags;
struct dwarf_cie *cie;
struct dwarf_fde *fde;
/*
* Deallocate all the memory allocated for the DWARF unwinder.
* Traverse all the FDE/CIE lists and remove and free all the
* memory associated with those data structures.
*/
spin_lock_irqsave(&dwarf_cie_lock, flags);
list_for_each_entry_safe(cie, m, &dwarf_cie_list, link)
list_for_each_entry(cie, &dwarf_cie_list, link)
kfree(cie);
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
spin_lock_irqsave(&dwarf_fde_lock, flags);
list_for_each_entry_safe(fde, n, &dwarf_fde_list, link)
list_for_each_entry(fde, &dwarf_fde_list, link)
kfree(fde);
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
}
/**
......@@ -816,7 +883,7 @@ static void dwarf_unwinder_cleanup(void)
* easy to lookup the FDE for a given PC, so we build a list of FDE
* and CIE entries that make it easier.
*/
void dwarf_unwinder_init(void)
static int __init dwarf_unwinder_init(void)
{
u32 entry_type;
void *p, *entry;
......@@ -831,6 +898,21 @@ void dwarf_unwinder_init(void)
f_entries = 0;
entry = &__start_eh_frame;
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL);
dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL);
dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
mempool_alloc_slab,
mempool_free_slab,
dwarf_frame_cachep);
dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
mempool_alloc_slab,
mempool_free_slab,
dwarf_reg_cachep);
while ((char *)entry < __stop_eh_frame) {
p = entry;
......@@ -860,7 +942,7 @@ void dwarf_unwinder_init(void)
else
c_entries++;
} else {
err = dwarf_parse_fde(entry, entry_type, p, len);
err = dwarf_parse_fde(entry, entry_type, p, len, end);
if (err < 0)
goto out;
else
......@@ -877,9 +959,11 @@ void dwarf_unwinder_init(void)
if (err)
goto out;
return;
return 0;
out:
printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
dwarf_unwinder_cleanup();
return -EINVAL;
}
early_initcall(dwarf_unwinder_init);
......@@ -14,7 +14,6 @@
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
#include <asm/dwarf.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
......@@ -262,9 +261,6 @@ void __init init_IRQ(void)
sh_mv.mv_init_irq();
irq_ctx_init(smp_processor_id());
/* This needs to be early, but not too early.. */
dwarf_unwinder_init();
}
#ifdef CONFIG_SPARSE_IRQ
......
......@@ -5,18 +5,32 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/unwinder.h>
#include <asm/system.h>
#ifdef CONFIG_BUG
static void handle_BUG(struct pt_regs *regs)
void handle_BUG(struct pt_regs *regs)
{
const struct bug_entry *bug;
unsigned long bugaddr = regs->pc;
enum bug_trap_type tt;
tt = report_bug(regs->pc, regs);
if (!is_valid_bugaddr(bugaddr))
goto invalid;
bug = find_bug(bugaddr);
/* Switch unwinders when unwind_stack() is called */
if (bug->flags & BUGFLAG_UNWINDER)
unwinder_faulted = 1;
tt = report_bug(bugaddr, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->pc += instruction_size(regs->pc);
regs->pc += instruction_size(bugaddr);
return;
}
invalid:
die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
}
......@@ -28,8 +42,10 @@ int is_valid_bugaddr(unsigned long addr)
return 0;
if (probe_kernel_address((insn_size_t *)addr, opcode))
return 0;
if (opcode == TRAPA_BUG_OPCODE)
return 1;
return opcode == TRAPA_BUG_OPCODE;
return 0;
}
#endif
......
......@@ -136,6 +136,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
......
......@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/unwinder.h>
#include <asm/atomic.h>
......@@ -53,8 +54,6 @@ static struct list_head unwinder_list = {
static DEFINE_SPINLOCK(unwinder_lock);
static atomic_t unwinder_running = ATOMIC_INIT(0);
/**
* select_unwinder - Select the best registered stack unwinder.
*
......@@ -122,6 +121,8 @@ int unwinder_register(struct unwinder *u)
return ret;
}
int unwinder_faulted = 0;
/*
* Unwind the call stack and pass information to the stacktrace_ops
* functions. Also handle the case where we need to switch to a new
......@@ -144,19 +145,20 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs,
* Hopefully this will give us a semi-reliable stacktrace so we
* can diagnose why curr_unwinder->dump() faulted.
*/
if (atomic_inc_return(&unwinder_running) != 1) {
if (unwinder_faulted) {
spin_lock_irqsave(&unwinder_lock, flags);
if (!list_is_singular(&unwinder_list)) {
/* Make sure no one beat us to changing the unwinder */
if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
list_del(&curr_unwinder->list);
curr_unwinder = select_unwinder();
unwinder_faulted = 0;
}
spin_unlock_irqrestore(&unwinder_lock, flags);
atomic_dec(&unwinder_running);
}
curr_unwinder->dump(task, regs, sp, ops, data);
atomic_dec(&unwinder_running);
}
EXPORT_SYMBOL_GPL(unwind_stack);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册