diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 448dfd0b2ea6f8f2cd9c21205756b6f576235977..b2676a16cfbe2448f39d6ee975f96785a1bb581c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -126,8 +126,7 @@ * Check whether we were atomic before we did preempt_disable(): * (used by the scheduler) */ -#define in_atomic_preempt_off() \ - ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) +#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d6989f85c641da84830c978a08b3af5fae4fa869..ca260cc5d881d5c70595be16814eb1d867fdae07 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7486,7 +7486,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); + int nested = preempt_count() + rcu_preempt_depth(); return (nested == preempt_offset); }