提交 03e0d461 编写于 作者: T Tejun Heo

watchdog: introduce touch_softlockup_watchdog_sched()

touch_softlockup_watchdog() is used to tell watchdog that scheduler
stall is expected.  One group of usage is from paths where the task
may not be able to yield for a long time such as performing slow PIO
to finicky device and coming out of suspend.  The other is to account
for scheduler and timer going idle.

For scheduler softlockup detection, there's no reason to distinguish
the two cases; however, workqueue lockup detector is planned and it
can use the same signals from the former group while the latter would
spuriously prevent detection.  This patch introduces a new function
touch_softlockup_watchdog_sched() and convert the latter group to call
it instead.  For now, it just calls touch_softlockup_watchdog() and
there's no functional difference.
Signed-off-by: NTejun Heo <tj@kernel.org>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
上级 fca839c0
...@@ -377,6 +377,7 @@ extern void scheduler_tick(void); ...@@ -377,6 +377,7 @@ extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p); extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_LOCKUP_DETECTOR #ifdef CONFIG_LOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void); extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void); extern void touch_all_softlockup_watchdogs(void);
...@@ -387,6 +388,9 @@ extern unsigned int softlockup_panic; ...@@ -387,6 +388,9 @@ extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic; extern unsigned int hardlockup_panic;
void lockup_detector_init(void); void lockup_detector_init(void);
#else #else
static inline void touch_softlockup_watchdog_sched(void)
{
}
static inline void touch_softlockup_watchdog(void) static inline void touch_softlockup_watchdog(void)
{ {
} }
......
...@@ -354,7 +354,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) ...@@ -354,7 +354,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
return; return;
sched_clock_tick(); sched_clock_tick();
touch_softlockup_watchdog(); touch_softlockup_watchdog_sched();
} }
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
......
...@@ -143,7 +143,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) ...@@ -143,7 +143,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
* when we go busy again does not account too much ticks. * when we go busy again does not account too much ticks.
*/ */
if (ts->tick_stopped) { if (ts->tick_stopped) {
touch_softlockup_watchdog(); touch_softlockup_watchdog_sched();
if (is_idle_task(current)) if (is_idle_task(current))
ts->idle_jiffies++; ts->idle_jiffies++;
} }
...@@ -430,7 +430,7 @@ static void tick_nohz_update_jiffies(ktime_t now) ...@@ -430,7 +430,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
tick_do_update_jiffies64(now); tick_do_update_jiffies64(now);
local_irq_restore(flags); local_irq_restore(flags);
touch_softlockup_watchdog(); touch_softlockup_watchdog_sched();
} }
/* /*
...@@ -701,7 +701,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) ...@@ -701,7 +701,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
update_cpu_load_nohz(); update_cpu_load_nohz();
calc_load_exit_idle(); calc_load_exit_idle();
touch_softlockup_watchdog(); touch_softlockup_watchdog_sched();
/* /*
* Cancel the scheduled timer and restore the tick * Cancel the scheduled timer and restore the tick
*/ */
......
...@@ -225,7 +225,15 @@ static void __touch_watchdog(void) ...@@ -225,7 +225,15 @@ static void __touch_watchdog(void)
__this_cpu_write(watchdog_touch_ts, get_timestamp()); __this_cpu_write(watchdog_touch_ts, get_timestamp());
} }
void touch_softlockup_watchdog(void) /**
* touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
*
* Call when the scheduler may have stalled for legitimate reasons
* preventing the watchdog task from executing - e.g. the scheduler
* entering idle state. This should only be used for scheduler events.
* Use touch_softlockup_watchdog() for everything else.
*/
void touch_softlockup_watchdog_sched(void)
{ {
/* /*
* Preemption can be enabled. It doesn't matter which CPU's timestamp * Preemption can be enabled. It doesn't matter which CPU's timestamp
...@@ -233,6 +241,11 @@ void touch_softlockup_watchdog(void) ...@@ -233,6 +241,11 @@ void touch_softlockup_watchdog(void)
*/ */
raw_cpu_write(watchdog_touch_ts, 0); raw_cpu_write(watchdog_touch_ts, 0);
} }
void touch_softlockup_watchdog(void)
{
touch_softlockup_watchdog_sched();
}
EXPORT_SYMBOL(touch_softlockup_watchdog); EXPORT_SYMBOL(touch_softlockup_watchdog);
void touch_all_softlockup_watchdogs(void) void touch_all_softlockup_watchdogs(void)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册