diff --git a/kernel/softirq.c b/kernel/softirq.c index 19668d614f4738d511df3289d650e55ca3bb7c8d..4196b9f84690066a75f6e67bdefbe4ff79eb0336 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -350,7 +350,8 @@ asmlinkage __visible void do_softirq(void) */ void irq_enter_rcu(void) { - if (is_idle_task(current) && !in_interrupt()) { + if (tick_nohz_full_cpu(smp_processor_id()) || + (is_idle_task(current) && !in_interrupt())) { /* * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 33750db5b56469e9cd770a0f0a4bfe75cdd7f747..aed5d6b6ca718f90a3509d97295e413ca5419a83 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1347,6 +1347,13 @@ static inline void tick_nohz_irq_enter(void) now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(ts, now); + /* + * If all CPUs are idle. We may need to update a stale jiffies value. + * Note nohz_full is a special case: a timekeeper is guaranteed to stay + * alive but it might be busy looping with interrupts disabled in some + * rare case (typically stop machine). So we must make sure we have a + * last resort. + */ if (ts->tick_stopped) tick_nohz_update_jiffies(now); }