提交 e73dfe30 编写于 作者: Z Zhen Lei 提交者: Paul E. McKenney

sched/debug: Try trigger_single_cpu_backtrace(cpu) in dump_cpu_task()

The trigger_all_cpu_backtrace() function attempts to send an NMI to the
target CPU, which usually provides much better stack traces than the
dump_cpu_task() function's approach of dumping that stack from some other
CPU.  So much so that most calls to dump_cpu_task() only happen after
a call to trigger_all_cpu_backtrace() has failed.  And the exception to
this rule really should attempt to use trigger_all_cpu_backtrace() first.

Therefore, move the trigger_all_cpu_backtrace() invocation into
dump_cpu_task().
Signed-off-by: NZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: NPaul E. McKenney <paulmck@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Valentin Schneider <vschneid@redhat.com>
上级 d8f3f583
...@@ -368,7 +368,7 @@ static void rcu_dump_cpu_stacks(void) ...@@ -368,7 +368,7 @@ static void rcu_dump_cpu_stacks(void)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
if (cpu_is_offline(cpu)) if (cpu_is_offline(cpu))
pr_err("Offline CPU %d blocking current GP.\n", cpu); pr_err("Offline CPU %d blocking current GP.\n", cpu);
else if (!trigger_single_cpu_backtrace(cpu)) else
dump_cpu_task(cpu); dump_cpu_task(cpu);
} }
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
...@@ -511,8 +511,7 @@ static void rcu_check_gp_kthread_starvation(void) ...@@ -511,8 +511,7 @@ static void rcu_check_gp_kthread_starvation(void)
pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
} else { } else {
pr_err("Stack dump where RCU GP kthread last ran:\n"); pr_err("Stack dump where RCU GP kthread last ran:\n");
if (!trigger_single_cpu_backtrace(cpu)) dump_cpu_task(cpu);
dump_cpu_task(cpu);
} }
} }
wake_up_process(gpk); wake_up_process(gpk);
......
...@@ -11183,6 +11183,9 @@ struct cgroup_subsys cpu_cgrp_subsys = { ...@@ -11183,6 +11183,9 @@ struct cgroup_subsys cpu_cgrp_subsys = {
void dump_cpu_task(int cpu) void dump_cpu_task(int cpu)
{ {
if (trigger_single_cpu_backtrace(cpu))
return;
pr_info("Task dump for CPU %d:\n", cpu); pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu)); sched_show_task(cpu_curr(cpu));
} }
......
...@@ -370,8 +370,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 * ...@@ -370,8 +370,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
if (cpu >= 0) { if (cpu >= 0) {
if (static_branch_unlikely(&csdlock_debug_extended)) if (static_branch_unlikely(&csdlock_debug_extended))
csd_lock_print_extended(csd, cpu); csd_lock_print_extended(csd, cpu);
if (!trigger_single_cpu_backtrace(cpu)) dump_cpu_task(cpu);
dump_cpu_task(cpu);
if (!cpu_cur_csd) { if (!cpu_cur_csd) {
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
arch_send_call_function_single_ipi(cpu); arch_send_call_function_single_ipi(cpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册