提交 a4e637b3 编写于 作者: W Wei Li 提交者: Zheng Zengkai

arm64: Add support for hard lockup by using pmu counter

hulk inclusion
category: feature
bugzilla: 49592
CVE: NA

-------------------------------------------------

This feature is based on "arm64: perf: add nmi support for pmu" patch
series. It can be enabled by passing the kernel cmdline parameter
"hardlockup_enable=on", or the perf NMI watchdog will be disabled
defaultly.
Signed-off-by: NWei Li <liwei391@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 43b33a48
......@@ -240,6 +240,11 @@ static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
static inline bool gic_supports_nmi(void)
{
return false;
}
static inline bool gic_prio_masking_enabled(void)
{
return false;
......
......@@ -172,6 +172,7 @@ config ARM64
select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
......@@ -1787,6 +1788,7 @@ config ARM64_MODULE_PLTS
config ARM64_PSEUDO_NMI
bool "Support for NMI-like interrupts"
select ARM_GIC_V3
select HAVE_PERF_EVENTS_NMI
help
Adds support for mimicking Non-Maskable Interrupts through the use of
GIC interrupt priority. This support requires version 3 or later of
......
......@@ -146,6 +146,14 @@ static inline u32 gic_read_rpr(void)
#define gicr_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_vpendbaser(c) readq_relaxed(c)
extern struct static_key_false supports_pseudo_nmis;
static inline bool gic_supports_nmi(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
static_branch_likely(&supports_pseudo_nmis);
}
static inline bool gic_prio_masking_enabled(void)
{
return system_uses_irq_prio_masking();
......
......@@ -35,6 +35,7 @@
#include <linux/console.h>
#include <linux/kvm_host.h>
#include <linux/perf/arm_pmu.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
......@@ -1363,3 +1364,112 @@ bool cpus_are_stuck_in_kernel(void)
return !!cpus_stuck_in_kernel || smp_spin_tables;
}
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
s64 hardlockup_enable;
static DEFINE_PER_CPU(u64, cpu_freq_probed);
static int __init hardlockup_enable_setup(char *str)
{
if (!strcasecmp(str, "on") || !strcasecmp(str, "1"))
hardlockup_enable = 1;
else if (!strcasecmp(str, "off") || !strcasecmp(str, "0"))
hardlockup_enable = 0;
return 1;
}
__setup("hardlockup_enable=", hardlockup_enable_setup);
static u64 arch_pmu_get_cycles(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
return armpmu->read_counter(event);
}
static u64 arch_probe_cpu_freq(void)
{
volatile int i;
u32 loop = 50000000;
u64 cycles_a, cycles_b;
u64 timer_a, timer_b;
u32 timer_hz = arch_timer_get_cntfrq();
struct perf_event *evt;
struct perf_event_attr timer_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 0,
.sample_period = 0xffffffffUL,
};
/* make sure the cycle counter is enabled */
evt = perf_event_create_kernel_counter(&timer_attr, smp_processor_id(),
NULL, NULL, NULL);
if (IS_ERR(evt))
return 0;
do {
timer_b = timer_a;
/* avoid dead loop here */
if (loop)
loop >>= 1;
else
break;
timer_a = arch_timer_read_counter();
cycles_a = arch_pmu_get_cycles(evt);
for (i = 0; i < loop; i++)
;
timer_b = arch_timer_read_counter();
cycles_b = arch_pmu_get_cycles(evt);
} while (cycles_b <= cycles_a);
perf_event_release_kernel(evt);
if (unlikely(timer_b == timer_a))
return 0;
return timer_hz * (cycles_b - cycles_a) / (timer_b - timer_a);
}
static u64 arch_get_cpu_freq(void)
{
u64 cpu_freq;
unsigned int cpu = smp_processor_id();
cpu_freq = per_cpu(cpu_freq_probed, cpu);
if (!cpu_freq) {
cpu_freq = arch_probe_cpu_freq();
pr_info("NMI watchdog: CPU%u freq probed as %llu HZ.\n",
smp_processor_id(), cpu_freq);
if (!cpu_freq)
cpu_freq = -1;
per_cpu(cpu_freq_probed, cpu) = cpu_freq;
}
if (-1 == cpu_freq)
cpu_freq = 0;
return cpu_freq;
}
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
u64 cpu_freq;
if (!gic_supports_nmi())
return 0;
if (hardlockup_enable != 0) {
cpu_freq = arch_get_cpu_freq();
return cpu_freq * watchdog_thresh;
}
return 0;
}
#endif
......@@ -86,7 +86,7 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
* - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
* interrupt.
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
/*
* Global static key controlling whether an update to PMR allowing more
......@@ -375,12 +375,6 @@ static void gic_unmask_irq(struct irq_data *d)
gic_poke_irq(d, GICD_ISENABLER);
}
static inline bool gic_supports_nmi(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
static_branch_likely(&supports_pseudo_nmis);
}
static int gic_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
......
......@@ -191,6 +191,8 @@ static int hardlockup_detector_event_create(void)
wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
if (!wd_attr->sample_period)
return -EINVAL;
/* Try to register using hardware perf events */
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册