提交 afbfb52e 编写于 作者: P Paul Mundt

sh: stacktrace/lockdep/irqflags tracing support.

Wire up all of the essentials for lockdep..
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 c03c6961
...@@ -51,6 +51,14 @@ config GENERIC_TIME ...@@ -51,6 +51,14 @@ config GENERIC_TIME
config ARCH_MAY_HAVE_PC_FDC config ARCH_MAY_HAVE_PC_FDC
bool bool
config STACKTRACE_SUPPORT
bool
default y
config LOCKDEP_SUPPORT
bool
default y
source "init/Kconfig" source "init/Kconfig"
menu "System type" menu "System type"
......
menu "Kernel hacking" menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
config SH_STANDARD_BIOS config SH_STANDARD_BIOS
......
...@@ -21,3 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o ...@@ -21,3 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
...@@ -184,6 +184,11 @@ trap_entry: ...@@ -184,6 +184,11 @@ trap_entry:
add r15,r8 add r15,r8
mov.l r9,@r8 mov.l r9,@r8
mov r9,r8 mov r9,r8
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 5f, r9
jsr @r9
nop
#endif
sti sti
bra system_call bra system_call
nop nop
...@@ -193,6 +198,9 @@ trap_entry: ...@@ -193,6 +198,9 @@ trap_entry:
2: .long break_point_trap_software 2: .long break_point_trap_software
3: .long NR_syscalls 3: .long NR_syscalls
4: .long sys_call_table 4: .long sys_call_table
#ifdef CONFIG_TRACE_IRQFLAGS
5: .long trace_hardirqs_on
#endif
#if defined(CONFIG_SH_STANDARD_BIOS) #if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */ /* Unwind the stack and jmp to the debug entry */
...@@ -255,6 +263,11 @@ ENTRY(address_error_handler) ...@@ -255,6 +263,11 @@ ENTRY(address_error_handler)
restore_all: restore_all:
cli cli
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 3f, r0
jsr @r0
nop
#endif
mov r15,r0 mov r15,r0
mov.l $cpu_mode,r2 mov.l $cpu_mode,r2
mov #OFF_SR,r3 mov #OFF_SR,r3
...@@ -307,6 +320,9 @@ $current_thread_info: ...@@ -307,6 +320,9 @@ $current_thread_info:
.long __current_thread_info .long __current_thread_info
$cpu_mode: $cpu_mode:
.long __cpu_mode .long __cpu_mode
#ifdef CONFIG_TRACE_IRQFLAGS
3: .long trace_hardirqs_off
#endif
! common exception handler ! common exception handler
#include "../../entry-common.S" #include "../../entry-common.S"
......
...@@ -140,7 +140,7 @@ call_dpf: ...@@ -140,7 +140,7 @@ call_dpf:
mov.l 1f, r0 mov.l 1f, r0
mov.l @r0, r6 ! address mov.l @r0, r6 ! address
mov.l 3f, r0 mov.l 3f, r0
sti
jmp @r0 jmp @r0
mov r15, r4 ! regs mov r15, r4 ! regs
......
...@@ -100,6 +100,11 @@ debug_trap: ...@@ -100,6 +100,11 @@ debug_trap:
.align 2 .align 2
ENTRY(exception_error) ENTRY(exception_error)
! !
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 3f, r0
jsr @r0
nop
#endif
sti sti
mov.l 2f, r0 mov.l 2f, r0
jmp @r0 jmp @r0
...@@ -109,10 +114,18 @@ ENTRY(exception_error) ...@@ -109,10 +114,18 @@ ENTRY(exception_error)
.align 2 .align 2
1: .long break_point_trap_software 1: .long break_point_trap_software
2: .long do_exception_error 2: .long do_exception_error
#ifdef CONFIG_TRACE_IRQFLAGS
3: .long trace_hardirqs_on
#endif
.align 2 .align 2
ret_from_exception: ret_from_exception:
preempt_stop() preempt_stop()
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 4f, r0
jsr @r0
nop
#endif
ENTRY(ret_from_irq) ENTRY(ret_from_irq)
! !
mov #OFF_SR, r0 mov #OFF_SR, r0
...@@ -143,6 +156,11 @@ need_resched: ...@@ -143,6 +156,11 @@ need_resched:
mov.l 1f, r0 mov.l 1f, r0
mov.l r0, @(TI_PRE_COUNT,r8) mov.l r0, @(TI_PRE_COUNT,r8)
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 3f, r0
jsr @r0
nop
#endif
sti sti
mov.l 2f, r0 mov.l 2f, r0
jsr @r0 jsr @r0
...@@ -150,9 +168,15 @@ need_resched: ...@@ -150,9 +168,15 @@ need_resched:
mov #0, r0 mov #0, r0
mov.l r0, @(TI_PRE_COUNT,r8) mov.l r0, @(TI_PRE_COUNT,r8)
cli cli
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 4f, r0
jsr @r0
nop
#endif
bra need_resched bra need_resched
nop nop
noresched: noresched:
bra __restore_all bra __restore_all
nop nop
...@@ -160,11 +184,20 @@ noresched: ...@@ -160,11 +184,20 @@ noresched:
.align 2 .align 2
1: .long PREEMPT_ACTIVE 1: .long PREEMPT_ACTIVE
2: .long schedule 2: .long schedule
#ifdef CONFIG_TRACE_IRQFLAGS
3: .long trace_hardirqs_on
4: .long trace_hardirqs_off
#endif
#endif #endif
ENTRY(resume_userspace) ENTRY(resume_userspace)
! r8: current_thread_info ! r8: current_thread_info
cli cli
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 5f, r0
jsr @r0
nop
#endif
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #_TIF_WORK_MASK, r0 tst #_TIF_WORK_MASK, r0
bt/s __restore_all bt/s __restore_all
...@@ -210,6 +243,11 @@ work_resched: ...@@ -210,6 +243,11 @@ work_resched:
jsr @r1 ! schedule jsr @r1 ! schedule
nop nop
cli cli
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 5f, r0
jsr @r0
nop
#endif
! !
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #_TIF_WORK_MASK, r0 tst #_TIF_WORK_MASK, r0
...@@ -221,6 +259,10 @@ work_resched: ...@@ -221,6 +259,10 @@ work_resched:
1: .long schedule 1: .long schedule
2: .long do_notify_resume 2: .long do_notify_resume
3: .long restore_all 3: .long restore_all
#ifdef CONFIG_TRACE_IRQFLAGS
4: .long trace_hardirqs_on
5: .long trace_hardirqs_off
#endif
.align 2 .align 2
syscall_exit_work: syscall_exit_work:
...@@ -229,6 +271,11 @@ syscall_exit_work: ...@@ -229,6 +271,11 @@ syscall_exit_work:
tst #_TIF_SYSCALL_TRACE, r0 tst #_TIF_SYSCALL_TRACE, r0
bt/s work_pending bt/s work_pending
tst #_TIF_NEED_RESCHED, r0 tst #_TIF_NEED_RESCHED, r0
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 5f, r0
jsr @r0
nop
#endif
sti sti
! XXX setup arguments... ! XXX setup arguments...
mov.l 4f, r0 ! do_syscall_trace mov.l 4f, r0 ! do_syscall_trace
...@@ -265,7 +312,7 @@ syscall_trace_entry: ...@@ -265,7 +312,7 @@ syscall_trace_entry:
mov.l r0, @(OFF_R0,r15) ! Return value mov.l r0, @(OFF_R0,r15) ! Return value
__restore_all: __restore_all:
mov.l 1f,r0 mov.l 1f, r0
jmp @r0 jmp @r0
nop nop
...@@ -331,7 +378,13 @@ ENTRY(system_call) ...@@ -331,7 +378,13 @@ ENTRY(system_call)
mov #OFF_TRA, r9 mov #OFF_TRA, r9
add r15, r9 add r15, r9
mov.l r8, @r9 ! set TRA value to tra mov.l r8, @r9 ! set TRA value to tra
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 5f, r10
jsr @r10
nop
#endif
sti sti
! !
get_current_thread_info r8, r10 get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8 mov.l @(TI_FLAGS,r8), r8
...@@ -355,6 +408,11 @@ syscall_call: ...@@ -355,6 +408,11 @@ syscall_call:
! !
syscall_exit: syscall_exit:
cli cli
#ifdef CONFIG_TRACE_IRQFLAGS
mov.l 6f, r0
jsr @r0
nop
#endif
! !
get_current_thread_info r8, r0 get_current_thread_info r8, r0
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
...@@ -369,3 +427,7 @@ syscall_exit: ...@@ -369,3 +427,7 @@ syscall_exit:
2: .long NR_syscalls 2: .long NR_syscalls
3: .long sys_call_table 3: .long sys_call_table
4: .long do_syscall_trace 4: .long do_syscall_trace
#ifdef CONFIG_TRACE_IRQFLAGS
5: .long trace_hardirqs_on
6: .long trace_hardirqs_off
#endif
/*
* arch/sh/kernel/stacktrace.c
*
* Stack trace management functions
*
* Copyright (C) 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
{
unsigned long *sp;
if (!task)
task = current;
if (task == current)
sp = (unsigned long *)current_stack_pointer;
else
sp = (unsigned long *)task->thread.sp;
while (!kstack_end(sp)) {
unsigned long addr = *sp++;
if (__kernel_text_address(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
}
...@@ -37,6 +37,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -37,6 +37,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
int si_code; int si_code;
siginfo_t info; siginfo_t info;
trace_hardirqs_on();
local_irq_enable();
#ifdef CONFIG_SH_KGDB #ifdef CONFIG_SH_KGDB
if (kgdb_nofault && kgdb_bus_err_hook) if (kgdb_nofault && kgdb_bus_err_hook)
kgdb_bus_err_hook(); kgdb_bus_err_hook();
......
#ifndef __ASM_SH_IRQFLAGS_H
#define __ASM_SH_IRQFLAGS_H
static inline void raw_local_irq_enable(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %1, %0\n\t"
#ifdef CONFIG_CPU_HAS_SR_RB
"stc r6_bank, %1\n\t"
"or %1, %0\n\t"
#endif
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x000000f0)
: "memory"
);
}
static inline void raw_local_irq_disable(void)
{
unsigned long flags;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
: "=&z" (flags)
: /* no inputs */
: "memory"
);
}
static inline void set_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or %2, %0\n\t"
"and %3, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "r" (0x10000000), "r" (0xffffff0f)
: "memory"
);
}
static inline void clear_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %2, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x10000000)
: "memory"
);
}
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
: "=&z" (flags)
: /* no inputs */
: "memory"
);
return flags;
}
#define raw_local_save_flags(flags) \
do { (flags) = __raw_local_save_flags(); } while (0)
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return (flags != 0);
}
static inline int raw_irqs_disabled(void)
{
unsigned long flags = __raw_local_save_flags();
return raw_irqs_disabled_flags(flags);
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags, __dummy;
__asm__ __volatile__ (
"stc sr, %1\n\t"
"mov %1, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
"mov %1, %0\n\t"
"and #0xf0, %0\n\t"
: "=&z" (flags), "=&r" (__dummy)
: /* no inputs */
: "memory"
);
return flags;
}
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
static inline void raw_local_irq_restore(unsigned long flags)
{
if ((flags & 0xf0) != 0xf0)
raw_local_irq_enable();
}
#endif /* __ASM_SH_IRQFLAGS_H */
...@@ -25,11 +25,21 @@ struct rw_semaphore { ...@@ -25,11 +25,21 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
}; };
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
LIST_HEAD_INIT((name).wait_list) } LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \ #define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name) struct rw_semaphore name = __RWSEM_INITIALIZER(name)
...@@ -39,6 +49,16 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); ...@@ -39,6 +49,16 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
static inline void init_rwsem(struct rw_semaphore *sem) static inline void init_rwsem(struct rw_semaphore *sem)
{ {
sem->count = RWSEM_UNLOCKED_VALUE; sem->count = RWSEM_UNLOCKED_VALUE;
...@@ -141,6 +161,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -141,6 +161,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
} }
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
__down_write(sem);
}
/* /*
* implement exchange and add functionality * implement exchange and add functionality
*/ */
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Copyright (C) 2002 Paul Mundt * Copyright (C) 2002 Paul Mundt
*/ */
#include <linux/irqflags.h>
#include <asm/types.h> #include <asm/types.h>
/* /*
...@@ -131,103 +132,6 @@ static inline unsigned long tas(volatile int *m) ...@@ -131,103 +132,6 @@ static inline unsigned long tas(volatile int *m)
#define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) do { xchg(&var, value); } while (0)
/* Interrupt Control */
#ifdef CONFIG_CPU_HAS_SR_RB
static inline void local_irq_enable(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__("stc sr, %0\n\t"
"and %1, %0\n\t"
"stc r6_bank, %1\n\t"
"or %1, %0\n\t"
"ldc %0, sr"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x000000f0)
: "memory");
}
#else
static inline void local_irq_enable(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %1, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x000000f0)
: "memory");
}
#endif
static inline void local_irq_disable(void)
{
unsigned long __dummy;
__asm__ __volatile__("stc sr, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr"
: "=&z" (__dummy)
: /* no inputs */
: "memory");
}
static inline void set_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ ("stc sr, %0\n\t"
"or %2, %0\n\t"
"and %3, %0\n\t"
"ldc %0, sr"
: "=&r" (__dummy0), "=r" (__dummy1)
: "r" (0x10000000), "r" (0xffffff0f)
: "memory");
}
static inline void clear_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ ("stc sr, %0\n\t"
"and %2, %0\n\t"
"ldc %0, sr"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x10000000)
: "memory");
}
#define local_save_flags(x) \
__asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
(flags != 0); \
})
static inline unsigned long local_irq_save(void)
{
unsigned long flags, __dummy;
__asm__ __volatile__("stc sr, %1\n\t"
"mov %1, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
"mov %1, %0\n\t"
"and #0xf0, %0"
: "=&z" (flags), "=&r" (__dummy)
:/**/
: "memory" );
return flags;
}
#define local_irq_restore(x) do { \
if ((x & 0x000000f0) != 0x000000f0) \
local_irq_enable(); \
} while (0)
/* /*
* Jump to P2 area. * Jump to P2 area.
* When handling TLB or caches, we need to do it from P2 area. * When handling TLB or caches, we need to do it from P2 area.
...@@ -264,9 +168,6 @@ do { \ ...@@ -264,9 +168,6 @@ do { \
: "=&r" (__dummy)); \ : "=&r" (__dummy)); \
} while (0) } while (0)
/* For spinlocks etc */
#define local_irq_save(x) x = local_irq_save()
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{ {
unsigned long flags, retval; unsigned long flags, retval;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册