提交 196e4430 编写于 作者: J Jingyi Wang 提交者: Yang Yingliang

arm64: watchdog: add switch to select sdei_watchdog/pmu_watchdog

hulk inclusion
category: feature
bugzilla: NA
CVE: NA

-------------------------------------------------

On aarch64, we can compile both SDEI_WATCHODG and PMU_WATCHDOG code
instead of choosing one.  SDEI_WATCHDOG is used by default, and if
SDEI_WATCHDOG is disabled by kernel parameter "disable_sdei_nmi_watchdog",
PMU_WATCHDOG is used instead.
Signed-off-by: NJingyi Wang <wangjingyi11@huawei.com>
Signed-off-by: NWei Li <liwei391@huawei.com>
Reviewed-by: NXiongfeng Wang <wangxiongfeng2@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 eceac1fe
......@@ -24,7 +24,7 @@ static int sdei_watchdog_event_num;
static bool disable_sdei_nmi_watchdog;
static bool sdei_watchdog_registered;
int watchdog_nmi_enable(unsigned int cpu)
int watchdog_sdei_enable(unsigned int cpu)
{
int ret;
......@@ -47,7 +47,7 @@ int watchdog_nmi_enable(unsigned int cpu)
return 0;
}
void watchdog_nmi_disable(unsigned int cpu)
void watchdog_sdei_disable(unsigned int cpu)
{
int ret;
......@@ -92,13 +92,10 @@ void sdei_watchdog_clear_eoi(void)
sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ);
}
int __init watchdog_nmi_probe(void)
int __init watchdog_sdei_probe(void)
{
int ret;
if (disable_sdei_nmi_watchdog)
return -EINVAL;
if (!is_hyp_mode_available()) {
pr_err("Disable SDEI NMI Watchdog in VM\n");
return -EINVAL;
......@@ -135,3 +132,17 @@ int __init watchdog_nmi_probe(void)
return 0;
}
static struct watchdog_operations arch_watchdog_ops = {
.watchdog_nmi_stop = &watchdog_nmi_stop,
.watchdog_nmi_start = &watchdog_nmi_start,
.watchdog_nmi_probe = &watchdog_sdei_probe,
.watchdog_nmi_enable = &watchdog_sdei_enable,
.watchdog_nmi_disable = &watchdog_sdei_disable,
};
void watchdog_ops_init(void)
{
if (!disable_sdei_nmi_watchdog)
nmi_watchdog_ops = arch_watchdog_ops;
}
......@@ -130,6 +130,17 @@ int watchdog_nmi_probe(void);
int watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
struct watchdog_operations {
void (*watchdog_nmi_stop)(void);
void (*watchdog_nmi_start)(void);
int (*watchdog_nmi_probe)(void);
int (*watchdog_nmi_enable)(unsigned int cpu);
void (*watchdog_nmi_disable)(unsigned int cpu);
};
extern struct watchdog_operations nmi_watchdog_ops;
void watchdog_ops_init(void);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
......
......@@ -50,7 +50,16 @@ struct cpumask watchdog_allowed_mask __read_mostly;
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
struct watchdog_operations nmi_watchdog_ops = {
.watchdog_nmi_stop = &watchdog_nmi_stop,
.watchdog_nmi_start = &watchdog_nmi_start,
.watchdog_nmi_probe = &watchdog_nmi_probe,
.watchdog_nmi_enable = &watchdog_nmi_enable,
.watchdog_nmi_disable = &watchdog_nmi_disable,
};
#ifdef CONFIG_HARDLOCKUP_DETECTOR
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
*/
......@@ -496,7 +505,7 @@ static void watchdog_enable(unsigned int cpu)
__touch_watchdog();
/* Enable the perf event */
if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
watchdog_nmi_enable(cpu);
nmi_watchdog_ops.watchdog_nmi_enable(cpu);
}
static void watchdog_disable(unsigned int cpu)
......@@ -510,7 +519,7 @@ static void watchdog_disable(unsigned int cpu)
* between disabling the timer and disabling the perf event causes
* the perf NMI to detect a false positive.
*/
watchdog_nmi_disable(cpu);
nmi_watchdog_ops.watchdog_nmi_disable(cpu);
hrtimer_cancel(hrtimer);
wait_for_completion(this_cpu_ptr(&softlockup_completion));
}
......@@ -566,7 +575,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
static void lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
nmi_watchdog_ops.watchdog_nmi_stop();
softlockup_stop_all();
set_sample_period();
......@@ -574,7 +583,7 @@ static void lockup_detector_reconfigure(void)
if (watchdog_enabled && watchdog_thresh)
softlockup_start_all();
watchdog_nmi_start();
nmi_watchdog_ops.watchdog_nmi_start();
cpus_read_unlock();
/*
* Must be called outside the cpus locked section to prevent
......@@ -612,9 +621,9 @@ static __init void lockup_detector_setup(void)
static void lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
nmi_watchdog_ops.watchdog_nmi_stop();
lockup_detector_update_enable();
watchdog_nmi_start();
nmi_watchdog_ops.watchdog_nmi_start();
cpus_read_unlock();
}
static inline void lockup_detector_setup(void)
......@@ -772,15 +781,21 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
}
#endif /* CONFIG_SYSCTL */
void __weak watchdog_ops_init(void)
{
}
void __init lockup_detector_init(void)
{
watchdog_ops_init();
if (tick_nohz_full_enabled())
pr_info("Disabling watchdog on nohz_full cores by default\n");
cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_FLAG_TIMER));
if (!watchdog_nmi_probe())
if (!nmi_watchdog_ops.watchdog_nmi_probe())
nmi_watchdog_available = true;
lockup_detector_setup();
}
......@@ -835,11 +835,8 @@ config HARDLOCKUP_DETECTOR_PERF
bool
select SOFTLOCKUP_DETECTOR
choice
prompt "aarch64 NMI watchdog method"
menu "ARM64 NMI watchdog configuration"
depends on ARM64
help
Watchdog implementation method configuration.
config SDEI_WATCHDOG
bool "SDEI NMI Watchdog support"
......@@ -852,7 +849,7 @@ config PMU_WATCHDOG
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_HARDLOCKUP_DETECTOR_PERF
endchoice
endmenu # "ARM64 NMI watchdog configuration"
#
# Enables a timestamp based low pass filter to compensate for perf based
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册