提交 ef04b4e6 编写于 作者: D Daniel Thompson 提交者: Xie XiuQi

arm64: Add support for on-demand backtrace of other CPUs

hulk inclusion
category: feature
bugzilla: 12268
CVE: NA
-------------------------------------------------

Currently arm64 has no implementation of arch_trigger_cpumask_backtrace.
The patch provides one using library code recently added by Russell King
for for the majority of the implementation. Currently this is realized
using regular irqs but could, in the future, be implemented using
NMI-like mechanisms.

Note: There is a small (and nasty) change to the generic code to ensure
      good stack traces. The generic code currently assumes that
      show_regs() will include a stack trace but arch/arm64 does not do
      this so we must add extra code here. Ideas on a better approach
      here would be very welcome (is there any appetite to change arm64
      show_regs() or should we just tease out the dump code into a
      callback?).
Signed-off-by: NDaniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: NOleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: NWei Li <liwei391@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 98931e61
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define NR_IPI 7 #define NR_IPI 8
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
......
...@@ -13,5 +13,11 @@ static inline int nr_legacy_irqs(void) ...@@ -13,5 +13,11 @@ static inline int nr_legacy_irqs(void)
return 0; return 0;
} }
#ifdef CONFIG_SMP
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
#endif /* !__ASSEMBLER__ */ #endif /* !__ASSEMBLER__ */
#endif #endif
...@@ -151,6 +151,8 @@ bool cpus_are_stuck_in_kernel(void); ...@@ -151,6 +151,8 @@ bool cpus_are_stuck_in_kernel(void);
extern void crash_smp_send_stop(void); extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void); extern bool smp_crash_stop_failed(void);
void ipi_set_nmi_prio(void __iomem *base, u8 prio);
#endif /* ifndef __ASSEMBLY__ */ #endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */ #endif /* ifndef __ASM_SMP_H */
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/irqchip/arm-gic-v3.h> #include <linux/irqchip/arm-gic-v3.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
...@@ -83,7 +84,8 @@ enum ipi_msg_type { ...@@ -83,7 +84,8 @@ enum ipi_msg_type {
IPI_CPU_CRASH_STOP, IPI_CPU_CRASH_STOP,
IPI_TIMER, IPI_TIMER,
IPI_IRQ_WORK, IPI_IRQ_WORK,
IPI_WAKEUP IPI_WAKEUP,
IPI_CPU_BACKTRACE
}; };
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
...@@ -782,6 +784,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { ...@@ -782,6 +784,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"), S(IPI_IRQ_WORK, "IRQ work interrupts"),
S(IPI_WAKEUP, "CPU wake-up interrupts"), S(IPI_WAKEUP, "CPU wake-up interrupts"),
S(IPI_CPU_BACKTRACE, "backtrace interrupts"),
}; };
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
...@@ -941,6 +944,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs) ...@@ -941,6 +944,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break; break;
#endif #endif
case IPI_CPU_BACKTRACE:
nmi_enter();
nmi_cpu_backtrace(regs);
nmi_exit();
break;
default: default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break; break;
...@@ -1062,3 +1071,39 @@ bool cpus_are_stuck_in_kernel(void) ...@@ -1062,3 +1071,39 @@ bool cpus_are_stuck_in_kernel(void)
return !!cpus_stuck_in_kernel || smp_spin_tables; return !!cpus_stuck_in_kernel || smp_spin_tables;
} }
void ipi_set_nmi_prio(void __iomem *base, u8 prio)
{
/*
* Use writeb here may cause hardware error on D05,
* aovid this problem by using writel.
*/
u32 offset = (IPI_CPU_BACKTRACE / 4) * 4;
u32 shift = (IPI_CPU_BACKTRACE % 4) * 8;
u32 prios = readl_relaxed(base + offset);
/* clean old priority */
prios &= ~(0xff << shift);
/* set new priority*/
prios |= (prio << offset);
writel_relaxed(prios, base + GICR_IPRIORITYR0 + offset);
}
static void raise_nmi(cpumask_t *mask)
{
/*
* Generate the backtrace directly if we are running in a
* calling context that is not preemptible by the backtrace IPI.
*/
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
}
...@@ -468,6 +468,16 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) ...@@ -468,6 +468,16 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
{ {
int err; int err;
if (unlikely(irqnr < 16)) {
gic_write_eoir(irqnr);
if (static_branch_likely(&supports_deactivate_key))
gic_write_dir(irqnr);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#endif
return;
}
if (static_branch_likely(&supports_deactivate_key)) if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr); gic_write_eoir(irqnr);
/* /*
...@@ -853,6 +863,9 @@ static void gic_cpu_init(void) ...@@ -853,6 +863,9 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp); gic_cpu_config(rbase, gic_redist_wait_for_rwp);
if (gic_supports_nmi())
ipi_set_nmi_prio(rbase, GICD_INT_NMI_PRI);
/* initialise system registers */ /* initialise system registers */
gic_cpu_sys_reg_init(); gic_cpu_sys_reg_init();
} }
...@@ -1320,6 +1333,17 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -1320,6 +1333,17 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_update_vlpi_properties(); gic_update_vlpi_properties();
/*
* NMI backtrace DFX need check nmi support, this should be
* called before enable NMI backtrace DFX.
*/
if (gic_prio_masking_enabled()) {
if (!gic_has_group0() || gic_dist_security_disabled())
gic_enable_nmi_support();
else
pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
}
gic_smp_init(); gic_smp_init();
gic_dist_init(); gic_dist_init();
gic_cpu_init(); gic_cpu_init();
...@@ -1330,13 +1354,6 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -1330,13 +1354,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
its_cpu_init(); its_cpu_init();
} }
if (gic_prio_masking_enabled()) {
if (!gic_has_group0() || gic_dist_security_disabled())
gic_enable_nmi_support();
else
pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
}
return 0; return 0;
out_free: out_free:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册