提交 7e55668e 编写于 作者: J Jingyi Wang 提交者: Zheng Zengkai

arm64: watchdog: add switch to select sdei_watchdog/pmu_watchdog

hulk inclusion
category: feature
bugzilla: 49592
CVE: NA

-------------------------------------------------

On aarch64, we can compile both SDEI_WATCHODG and PMU_WATCHDOG code
instead of choosing one.  SDEI_WATCHDOG is used by default, and if
SDEI_WATCHDOG is disabled by kernel parameter "disable_sdei_nmi_watchdog",
PMU_WATCHDOG is used instead.
Signed-off-by: NJingyi Wang <wangjingyi11@huawei.com>
Signed-off-by: NWei Li <liwei391@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 d1f6beba
...@@ -25,7 +25,7 @@ static bool disable_sdei_nmi_watchdog; ...@@ -25,7 +25,7 @@ static bool disable_sdei_nmi_watchdog;
static bool sdei_watchdog_registered; static bool sdei_watchdog_registered;
static DEFINE_PER_CPU(ktime_t, last_check_time); static DEFINE_PER_CPU(ktime_t, last_check_time);
int watchdog_nmi_enable(unsigned int cpu) int watchdog_sdei_enable(unsigned int cpu)
{ {
int ret; int ret;
...@@ -49,7 +49,7 @@ int watchdog_nmi_enable(unsigned int cpu) ...@@ -49,7 +49,7 @@ int watchdog_nmi_enable(unsigned int cpu)
return 0; return 0;
} }
void watchdog_nmi_disable(unsigned int cpu) void watchdog_sdei_disable(unsigned int cpu)
{ {
int ret; int ret;
...@@ -111,13 +111,10 @@ void sdei_watchdog_clear_eoi(void) ...@@ -111,13 +111,10 @@ void sdei_watchdog_clear_eoi(void)
sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ);
} }
int __init watchdog_nmi_probe(void) int __init watchdog_sdei_probe(void)
{ {
int ret; int ret;
if (disable_sdei_nmi_watchdog)
return -EINVAL;
if (!is_hyp_mode_available()) { if (!is_hyp_mode_available()) {
pr_err("Disable SDEI NMI Watchdog in VM\n"); pr_err("Disable SDEI NMI Watchdog in VM\n");
return -EINVAL; return -EINVAL;
...@@ -154,3 +151,17 @@ int __init watchdog_nmi_probe(void) ...@@ -154,3 +151,17 @@ int __init watchdog_nmi_probe(void)
return 0; return 0;
} }
static struct watchdog_operations arch_watchdog_ops = {
.watchdog_nmi_stop = &watchdog_nmi_stop,
.watchdog_nmi_start = &watchdog_nmi_start,
.watchdog_nmi_probe = &watchdog_sdei_probe,
.watchdog_nmi_enable = &watchdog_sdei_enable,
.watchdog_nmi_disable = &watchdog_sdei_disable,
};
void watchdog_ops_init(void)
{
if (!disable_sdei_nmi_watchdog)
nmi_watchdog_ops = arch_watchdog_ops;
}
...@@ -131,6 +131,17 @@ int watchdog_nmi_probe(void); ...@@ -131,6 +131,17 @@ int watchdog_nmi_probe(void);
int watchdog_nmi_enable(unsigned int cpu); int watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu); void watchdog_nmi_disable(unsigned int cpu);
struct watchdog_operations {
void (*watchdog_nmi_stop)(void);
void (*watchdog_nmi_start)(void);
int (*watchdog_nmi_probe)(void);
int (*watchdog_nmi_enable)(unsigned int cpu);
void (*watchdog_nmi_disable)(unsigned int cpu);
};
extern struct watchdog_operations nmi_watchdog_ops;
void watchdog_ops_init(void);
/** /**
* touch_nmi_watchdog - restart NMI watchdog timeout. * touch_nmi_watchdog - restart NMI watchdog timeout.
* *
......
...@@ -48,6 +48,14 @@ static int __read_mostly nmi_watchdog_available; ...@@ -48,6 +48,14 @@ static int __read_mostly nmi_watchdog_available;
struct cpumask watchdog_cpumask __read_mostly; struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
struct watchdog_operations nmi_watchdog_ops = {
.watchdog_nmi_stop = &watchdog_nmi_stop,
.watchdog_nmi_start = &watchdog_nmi_start,
.watchdog_nmi_probe = &watchdog_nmi_probe,
.watchdog_nmi_enable = &watchdog_nmi_enable,
.watchdog_nmi_disable = &watchdog_nmi_disable,
};
#ifdef CONFIG_HARDLOCKUP_DETECTOR #ifdef CONFIG_HARDLOCKUP_DETECTOR
# ifdef CONFIG_SMP # ifdef CONFIG_SMP
...@@ -465,7 +473,7 @@ static void watchdog_enable(unsigned int cpu) ...@@ -465,7 +473,7 @@ static void watchdog_enable(unsigned int cpu)
__touch_watchdog(); __touch_watchdog();
/* Enable the perf event */ /* Enable the perf event */
if (watchdog_enabled & NMI_WATCHDOG_ENABLED) if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
watchdog_nmi_enable(cpu); nmi_watchdog_ops.watchdog_nmi_enable(cpu);
} }
static void watchdog_disable(unsigned int cpu) static void watchdog_disable(unsigned int cpu)
...@@ -479,7 +487,7 @@ static void watchdog_disable(unsigned int cpu) ...@@ -479,7 +487,7 @@ static void watchdog_disable(unsigned int cpu)
* between disabling the timer and disabling the perf event causes * between disabling the timer and disabling the perf event causes
* the perf NMI to detect a false positive. * the perf NMI to detect a false positive.
*/ */
watchdog_nmi_disable(cpu); nmi_watchdog_ops.watchdog_nmi_disable(cpu);
hrtimer_cancel(hrtimer); hrtimer_cancel(hrtimer);
wait_for_completion(this_cpu_ptr(&softlockup_completion)); wait_for_completion(this_cpu_ptr(&softlockup_completion));
} }
...@@ -535,7 +543,7 @@ int lockup_detector_offline_cpu(unsigned int cpu) ...@@ -535,7 +543,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
static void lockup_detector_reconfigure(void) static void lockup_detector_reconfigure(void)
{ {
cpus_read_lock(); cpus_read_lock();
watchdog_nmi_stop(); nmi_watchdog_ops.watchdog_nmi_stop();
softlockup_stop_all(); softlockup_stop_all();
set_sample_period(); set_sample_period();
...@@ -543,7 +551,7 @@ static void lockup_detector_reconfigure(void) ...@@ -543,7 +551,7 @@ static void lockup_detector_reconfigure(void)
if (watchdog_enabled && watchdog_thresh) if (watchdog_enabled && watchdog_thresh)
softlockup_start_all(); softlockup_start_all();
watchdog_nmi_start(); nmi_watchdog_ops.watchdog_nmi_start();
cpus_read_unlock(); cpus_read_unlock();
/* /*
* Must be called outside the cpus locked section to prevent * Must be called outside the cpus locked section to prevent
...@@ -581,9 +589,9 @@ static __init void lockup_detector_setup(void) ...@@ -581,9 +589,9 @@ static __init void lockup_detector_setup(void)
static void lockup_detector_reconfigure(void) static void lockup_detector_reconfigure(void)
{ {
cpus_read_lock(); cpus_read_lock();
watchdog_nmi_stop(); nmi_watchdog_ops.watchdog_nmi_stop();
lockup_detector_update_enable(); lockup_detector_update_enable();
watchdog_nmi_start(); nmi_watchdog_ops.watchdog_nmi_start();
cpus_read_unlock(); cpus_read_unlock();
} }
static inline void lockup_detector_setup(void) static inline void lockup_detector_setup(void)
...@@ -741,15 +749,21 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, ...@@ -741,15 +749,21 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
} }
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */
void __weak watchdog_ops_init(void)
{
}
void __init lockup_detector_init(void) void __init lockup_detector_init(void)
{ {
watchdog_ops_init();
if (tick_nohz_full_enabled()) if (tick_nohz_full_enabled())
pr_info("Disabling watchdog on nohz_full cores by default\n"); pr_info("Disabling watchdog on nohz_full cores by default\n");
cpumask_copy(&watchdog_cpumask, cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_FLAG_TIMER)); housekeeping_cpumask(HK_FLAG_TIMER));
if (!watchdog_nmi_probe()) if (!nmi_watchdog_ops.watchdog_nmi_probe())
nmi_watchdog_available = true; nmi_watchdog_available = true;
lockup_detector_setup(); lockup_detector_setup();
} }
...@@ -964,11 +964,8 @@ config HARDLOCKUP_DETECTOR_PERF ...@@ -964,11 +964,8 @@ config HARDLOCKUP_DETECTOR_PERF
bool bool
select SOFTLOCKUP_DETECTOR select SOFTLOCKUP_DETECTOR
choice menu "ARM64 NMI watchdog configuration"
prompt "aarch64 NMI watchdog method"
depends on ARM64 depends on ARM64
help
Watchdog implementation method configuration.
config SDEI_WATCHDOG config SDEI_WATCHDOG
bool "SDEI NMI Watchdog support" bool "SDEI NMI Watchdog support"
...@@ -981,7 +978,7 @@ config PMU_WATCHDOG ...@@ -981,7 +978,7 @@ config PMU_WATCHDOG
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_HARDLOCKUP_DETECTOR_PERF select HAVE_HARDLOCKUP_DETECTOR_PERF
endchoice endmenu # "ARM64 NMI watchdog configuration"
# #
# Enables a timestamp based low pass filter to compensate for perf based # Enables a timestamp based low pass filter to compensate for perf based
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册