From 75f93fed50c2abadbab6ef546b265f51ca975b27 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Sep 2013 17:30:03 +0200 Subject: [PATCH] sched: Revert need_resched() to look at TIF_NEED_RESCHED Yuanhan reported a serious throughput regression in his pigz benchmark. Using the ftrace patch I found that several idle paths need more TLC before we can switch the generic need_resched() over to preempt_need_resched. The preemption paths benefit most from preempt_need_resched and do indeed use it; all other need_resched() users don't really care that much so reverting need_resched() back to tif_need_resched() is the simple and safe solution. Reported-by: Yuanhan Liu Signed-off-by: Peter Zijlstra Cc: Fengguang Wu Cc: Huang Ying Cc: lkp@linux.intel.com Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20130927153003.GF15690@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/include/asm/preempt.h | 8 -------- include/asm-generic/preempt.h | 8 -------- include/linux/sched.h | 5 +++++ 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 1de41690ff99..8729723636fd 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -79,14 +79,6 @@ static __always_inline bool __preempt_count_dec_and_test(void) GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); } -/* - * Returns true when we need to resched -- even if we can not. - */ -static __always_inline bool need_resched(void) -{ - return unlikely(test_preempt_need_resched()); -} - /* * Returns true when we need to resched and can (barring IRQ state). */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 5dc14ed3791c..ddf2b420ac8f 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -84,14 +84,6 @@ static __always_inline bool __preempt_count_dec_and_test(void) return !--*preempt_count_ptr(); } -/* - * Returns true when we need to resched -- even if we can not. - */ -static __always_inline bool need_resched(void) -{ - return unlikely(test_preempt_need_resched()); -} - /* * Returns true when we need to resched and can (barring IRQ state). */ diff --git a/include/linux/sched.h b/include/linux/sched.h index b09798b672f3..2ac5285db434 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2577,6 +2577,11 @@ static inline bool __must_check current_clr_polling_and_test(void) } #endif +static __always_inline bool need_resched(void) +{ + return unlikely(tif_need_resched()); +} + /* * Thread group CPU time accounting. */ -- GitLab