提交 758d39eb 编写于 作者: H Heiko Carstens 提交者: Martin Schwidefsky

s390/dumpstack: merge all four stack tracers

We have four different stack tracers of which three had bugs. So it's
time to merge them to a single stack tracer which allows to specify a
call back function which will be called for each step.

This patch changes behavior a bit:

- the "nosched" and "in_sched_functions" check within
  save_stack_trace_tsk did work only for the last stack frame within a
  context. Now it considers the check for each stack frame like it
  should.

- both the oprofile variant and the perf_events variant did save a
  return address twice if a zero back chain was detected, which
  indicates an interrupt frame. The new dump_trace function will call
  the oprofile and perf_events backends with the psw address that is
  contained within the corresponding pt_regs structure instead.

- the original show_trace and save_context_stack functions did already
  use the psw address of the pt_regs structure if a zero back chain
  was detected. However now we ignore the psw address if it is a user
  space address. After all we trace the kernel stack and not the user
  space stack. This way we also get rid of the garbage user space
  address in case of warnings and / or panic call traces.

So this should make life easier since now there is only one stack
tracer left which we can break.
Signed-off-by: NHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 3c2c126a
......@@ -184,6 +184,10 @@ struct task_struct;
struct mm_struct;
struct seq_file;
typedef int (*dump_trace_func_t)(void *data, unsigned long address);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
......
......@@ -19,28 +19,28 @@
#include <asm/ipl.h>
/*
* For show_trace we have tree different stack to consider:
* For dump_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stack and can potentially
* The stack trace can start at any of the three stacks and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
static unsigned long
__show_trace(unsigned long sp, unsigned long low, unsigned long high)
__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
unsigned long addr;
while (1) {
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
addr = sf->gprs[8];
printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
/* Follow the backchain. */
while (1) {
if (func(data, sf->gprs[8]))
return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
......@@ -48,45 +48,58 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
addr = sf->gprs[8];
printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
addr = regs->psw.addr;
printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
if (!user_mode(regs)) {
if (func(data, regs->psw.addr))
return sp;
}
low = sp;
sp = regs->gprs[15];
}
}
static void show_trace(struct task_struct *task, unsigned long *stack)
void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
unsigned long sp)
{
const unsigned long frame_size =
STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
unsigned long sp;
unsigned long frame_size;
sp = (unsigned long) stack;
if (!sp)
sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp,
sp = __dump_trace(func, data, sp,
S390_lowcore.panic_stack + frame_size - 4096,
S390_lowcore.panic_stack + frame_size);
#endif
sp = __show_trace(sp,
sp = __dump_trace(func, data, sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
__dump_trace(func, data, sp,
(unsigned long)task_stack_page(task),
(unsigned long)task_stack_page(task) + THREAD_SIZE);
else
__show_trace(sp, S390_lowcore.thread_info,
__dump_trace(func, data, sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
}
EXPORT_SYMBOL_GPL(dump_trace);
static int show_address(void *data, unsigned long address)
{
printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0;
}
static void show_trace(struct task_struct *task, unsigned long sp)
{
if (!sp)
sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
dump_trace(show_address, NULL, task, sp);
if (!task)
task = current;
debug_show_held_locks(task);
......@@ -112,7 +125,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
printk("%016lx ", *stack++);
}
printk("\n");
show_trace(task, sp);
show_trace(task, (unsigned long)sp);
}
static void show_last_breaking_event(struct pt_regs *regs)
......@@ -152,7 +165,7 @@ void show_regs(struct pt_regs *regs)
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs))
show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_trace(NULL, regs->gprs[15]);
show_last_breaking_event(regs);
}
......
......@@ -222,64 +222,20 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
/* See also arch/s390/kernel/traps.c */
static unsigned long __store_trace(struct perf_callchain_entry *entry,
unsigned long sp,
unsigned long low, unsigned long high)
static int __perf_callchain_kernel(void *data, unsigned long address)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (1) {
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8]);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8]);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
perf_callchain_store(entry, sf->gprs[8]);
low = sp;
sp = regs->gprs[15];
}
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, address);
return 0;
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long head, frame_size;
struct stack_frame *head_sf;
if (user_mode(regs))
return;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame *) head;
if (!head_sf || !head_sf->back_chain)
return;
head = head_sf->back_chain;
head = __store_trace(entry, head,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
}
/* Perf defintions for PMU event attributes in sysfs */
......
......@@ -10,69 +10,31 @@
#include <linux/kallsyms.h>
#include <linux/module.h>
static unsigned long save_context_stack(struct stack_trace *trace,
unsigned long sp,
unsigned long low,
unsigned long high,
int nosched)
static int __save_address(void *data, unsigned long address, int nosched)
{
struct stack_frame *sf;
struct pt_regs *regs;
unsigned long addr;
struct stack_trace *trace = data;
while(1) {
if (sp < low || sp > high)
return sp;
sf = (struct stack_frame *)sp;
while(1) {
addr = sf->gprs[8];
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
if (nosched && in_sched_functions(address))
return 0;
if (trace->skip > 0) {
trace->skip--;
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *)sp;
return 0;
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long)(sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *)sp;
addr = regs->psw.addr;
if (!nosched || !in_sched_functions(addr)) {
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--;
}
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
sp = regs->gprs[15];
if (trace->nr_entries < trace->max_entries) {
trace->entries[trace->nr_entries++] = address;
return 0;
}
return 1;
}
static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
static int save_address(void *data, unsigned long address)
{
unsigned long new_sp, frame_size;
return __save_address(data, address, 0);
}
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
new_sp = save_context_stack(trace, sp,
S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
S390_lowcore.panic_stack + frame_size, 0);
new_sp = save_context_stack(trace, new_sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size, 0);
save_context_stack(trace, new_sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE, 0);
static int save_address_nosched(void *data, unsigned long address)
{
return __save_address(data, address, 1);
}
void save_stack_trace(struct stack_trace *trace)
......@@ -80,7 +42,7 @@ void save_stack_trace(struct stack_trace *trace)
unsigned long sp;
sp = current_stack_pointer();
__save_stack_trace(trace, sp);
dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
......@@ -88,14 +50,12 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long sp, low, high;
unsigned long sp;
sp = tsk->thread.ksp;
if (tsk == current)
sp = current_stack_pointer();
low = (unsigned long) task_stack_page(tsk);
high = (unsigned long) task_pt_regs(tsk);
save_context_stack(trace, sp, low, high, 1);
dump_trace(save_address_nosched, trace, tsk, sp);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
......@@ -106,7 +66,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
unsigned long sp;
sp = kernel_stack_pointer(regs);
__save_stack_trace(trace, sp);
dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
......
......@@ -6,5 +6,5 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
oprofile-y := $(DRIVER_OBJS) init.o
oprofile-y += hwsampler.o
/*
* S390 Version
* Copyright IBM Corp. 2005
* Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
*/
#include <linux/oprofile.h>
#include <asm/processor.h> /* for struct stack_frame */
static unsigned long
__show_trace(unsigned int *depth, unsigned long sp,
unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (*depth) {
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
(*depth)--;
oprofile_add_trace(sf->gprs[8]);
/* Follow the backchain. */
while (*depth) {
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
(*depth)--;
oprofile_add_trace(sf->gprs[8]);
}
if (*depth == 0)
break;
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
(*depth)--;
oprofile_add_trace(sf->gprs[8]);
low = sp;
sp = regs->gprs[15];
}
return sp;
}
void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long head, frame_size;
struct stack_frame* head_sf;
if (user_mode(regs))
return;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame*)head;
if (!head_sf->back_chain)
return;
head = head_sf->back_chain;
head = __show_trace(&depth, head,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
__show_trace(&depth, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
}
......@@ -20,8 +20,6 @@
#include "../../../drivers/oprofile/oprof.h"
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#include "hwsampler.h"
#include "op_counter.h"
......@@ -494,6 +492,24 @@ static void oprofile_hwsampler_exit(void)
hwsampler_shutdown();
}
static int __s390_backtrace(void *data, unsigned long address)
{
unsigned int *depth = data;
if (*depth == 0)
return 1;
(*depth)--;
oprofile_add_trace(address);
return 0;
}
static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
{
if (user_mode(regs))
return;
dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册