提交 a8f22264 编写于 作者: D David S. Miller

sparc64: Manage NMI watchdog enabling like x86.

Use a per-cpu 'wd_enabled' boolean and a global atomic_t count
of watchdog NMI enabled cpus which is set to '-1' if something
is wrong with the watchdog and it can't be used.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 825c9fb4
......@@ -5,6 +5,6 @@ extern int __init nmi_init(void);
extern void perfctr_irq(int irq, struct pt_regs *regs);
extern void nmi_adjust_hz(unsigned int new_hz);
extern int nmi_usable;
extern atomic_t nmi_active;
#endif /* __NMI_H */
......@@ -34,10 +34,17 @@
static int nmi_watchdog_active;
static int panic_on_timeout;
int nmi_usable;
EXPORT_SYMBOL_GPL(nmi_usable);
/* nmi_active:
* >0: the NMI watchdog is active, but can be disabled
* <0: the NMI watchdog has not been set up, and cannot be enabled
* 0: the NMI watchdog is disabled, but can be enabled
*/
atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
EXPORT_SYMBOL(nmi_active);
static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(local_t, alert_counter);
......@@ -110,7 +117,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
__get_cpu_var(last_irq_sum) = sum;
local_set(&__get_cpu_var(alert_counter), 0);
}
if (nmi_usable) {
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
......@@ -121,8 +128,6 @@ static inline unsigned int get_nmi_count(int cpu)
return cpu_data(cpu).__nmi_count;
}
static int endflag __initdata;
static __init void nmi_cpu_busy(void *data)
{
local_irq_enable_in_hardirq();
......@@ -143,12 +148,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
printk(KERN_WARNING
"and attach the output of the 'dmesg' command.\n");
nmi_usable = 0;
per_cpu(wd_enabled, cpu) = 0;
atomic_dec(&nmi_active);
}
static void stop_watchdog(void *unused)
static void stop_nmi_watchdog(void *unused)
{
pcr_ops->write(PCR_PIC_PRIV);
__get_cpu_var(wd_enabled) = 0;
atomic_dec(&nmi_active);
}
static int __init check_nmi_watchdog(void)
......@@ -156,6 +164,9 @@ static int __init check_nmi_watchdog(void)
unsigned int *prev_nmi_count;
int cpu, err;
if (!atomic_read(&nmi_active))
return 0;
prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
......@@ -172,12 +183,15 @@ static int __init check_nmi_watchdog(void)
mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu))
continue;
if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
report_broken_nmi(cpu, prev_nmi_count);
}
endflag = 1;
if (!nmi_usable) {
if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
err = -ENODEV;
goto error;
}
......@@ -188,12 +202,26 @@ static int __init check_nmi_watchdog(void)
kfree(prev_nmi_count);
return 0;
error:
on_each_cpu(stop_watchdog, NULL, 1);
on_each_cpu(stop_nmi_watchdog, NULL, 1);
return err;
}
static void start_watchdog(void *unused)
static void start_nmi_watchdog(void *unused)
{
__get_cpu_var(wd_enabled) = 1;
atomic_inc(&nmi_active);
pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
static void nmi_adjust_hz_one(void *unused)
{
if (!__get_cpu_var(wd_enabled))
return;
pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz));
......@@ -203,13 +231,13 @@ static void start_watchdog(void *unused)
void nmi_adjust_hz(unsigned int new_hz)
{
nmi_hz = new_hz;
on_each_cpu(start_watchdog, NULL, 1);
on_each_cpu(nmi_adjust_hz_one, NULL, 1);
}
EXPORT_SYMBOL_GPL(nmi_adjust_hz);
static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
{
on_each_cpu(stop_watchdog, NULL, 1);
on_each_cpu(stop_nmi_watchdog, NULL, 1);
return 0;
}
......@@ -221,16 +249,14 @@ int __init nmi_init(void)
{
int err;
nmi_usable = 1;
on_each_cpu(start_watchdog, NULL, 1);
on_each_cpu(start_nmi_watchdog, NULL, 1);
err = check_nmi_watchdog();
if (!err) {
err = register_reboot_notifier(&nmi_reboot_notifier);
if (err) {
nmi_usable = 0;
on_each_cpu(stop_watchdog, NULL, 1);
on_each_cpu(stop_nmi_watchdog, NULL, 1);
atomic_set(&nmi_active, -1);
}
}
return err;
......
......@@ -57,7 +57,7 @@ static void timer_stop(void)
static int op_nmi_timer_init(struct oprofile_operations *ops)
{
if (!nmi_usable)
if (atomic_read(&nmi_active) <= 0)
return -ENODEV;
ops->start = timer_start;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册