提交 bdd4e85d 编写于 作者: F Frederic Weisbecker

sched: Isolate preempt counting in its own config option

Create a new CONFIG_PREEMPT_COUNT that handles the inc/dec
of preempt count offset independently. So that the offset
can be updated by preempt_disable() and preempt_enable()
even without the need for CONFIG_PREEMPT beeing set.

This prepares to make CONFIG_DEBUG_SPINLOCK_SLEEP working
with !CONFIG_PREEMPT where it currently doesn't detect
code that sleeps inside explicit preemption disabled
sections.
Signed-off-by: NFrederic Weisbecker <fweisbec@gmail.com>
Acked-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
上级 2da8c8bc
...@@ -88,7 +88,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) ...@@ -88,7 +88,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
{ {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return test_bit(bitnum, addr); return test_bit(bitnum, addr);
#elif defined CONFIG_PREEMPT #elif defined CONFIG_PREEMPT_COUNT
return preempt_count(); return preempt_count();
#else #else
return 1; return 1;
......
...@@ -93,7 +93,7 @@ ...@@ -93,7 +93,7 @@
*/ */
#define in_nmi() (preempt_count() & NMI_MASK) #define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT) #if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_CHECK_OFFSET 1 # define PREEMPT_CHECK_OFFSET 1
#else #else
# define PREEMPT_CHECK_OFFSET 0 # define PREEMPT_CHECK_OFFSET 0
...@@ -115,7 +115,7 @@ ...@@ -115,7 +115,7 @@
#define in_atomic_preempt_off() \ #define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else #else
......
...@@ -134,7 +134,7 @@ static inline int page_cache_get_speculative(struct page *page) ...@@ -134,7 +134,7 @@ static inline int page_cache_get_speculative(struct page *page)
VM_BUG_ON(in_interrupt()); VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT # ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic()); VM_BUG_ON(!in_atomic());
# endif # endif
/* /*
...@@ -172,7 +172,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) ...@@ -172,7 +172,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
VM_BUG_ON(in_interrupt()); VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT # ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic()); VM_BUG_ON(!in_atomic());
# endif # endif
VM_BUG_ON(page_count(page) == 0); VM_BUG_ON(page_count(page) == 0);
......
...@@ -27,6 +27,21 @@ ...@@ -27,6 +27,21 @@
asmlinkage void preempt_schedule(void); asmlinkage void preempt_schedule(void);
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#else /* !CONFIG_PREEMPT */
#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \ #define preempt_disable() \
do { \ do { \
inc_preempt_count(); \ inc_preempt_count(); \
...@@ -39,12 +54,6 @@ do { \ ...@@ -39,12 +54,6 @@ do { \
dec_preempt_count(); \ dec_preempt_count(); \
} while (0) } while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
preempt_enable_no_resched(); \ preempt_enable_no_resched(); \
...@@ -80,18 +89,17 @@ do { \ ...@@ -80,18 +89,17 @@ do { \
preempt_check_resched(); \ preempt_check_resched(); \
} while (0) } while (0)
#else #else /* !CONFIG_PREEMPT_COUNT */
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0) #define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() do { } while (0) #define preempt_disable_notrace() do { } while (0)
#define preempt_enable_no_resched_notrace() do { } while (0) #define preempt_enable_no_resched_notrace() do { } while (0)
#define preempt_enable_notrace() do { } while (0) #define preempt_enable_notrace() do { } while (0)
#endif #endif /* CONFIG_PREEMPT_COUNT */
#ifdef CONFIG_PREEMPT_NOTIFIERS #ifdef CONFIG_PREEMPT_NOTIFIERS
......
...@@ -239,7 +239,7 @@ extern int rcu_read_lock_bh_held(void); ...@@ -239,7 +239,7 @@ extern int rcu_read_lock_bh_held(void);
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled. * and while lockdep is disabled.
*/ */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
int lockdep_opinion = 0; int lockdep_opinion = 0;
...@@ -250,12 +250,12 @@ static inline int rcu_read_lock_sched_held(void) ...@@ -250,12 +250,12 @@ static inline int rcu_read_lock_sched_held(void)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
} }
#else /* #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
return 1; return 1;
} }
#endif /* #else #ifdef CONFIG_PREEMPT */ #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
...@@ -276,17 +276,17 @@ static inline int rcu_read_lock_bh_held(void) ...@@ -276,17 +276,17 @@ static inline int rcu_read_lock_bh_held(void)
return 1; return 1;
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
return preempt_count() != 0 || irqs_disabled(); return preempt_count() != 0 || irqs_disabled();
} }
#else /* #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
return 1; return 1;
} }
#endif /* #else #ifdef CONFIG_PREEMPT */ #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
......
...@@ -2502,7 +2502,7 @@ extern int _cond_resched(void); ...@@ -2502,7 +2502,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock); extern int __cond_resched_lock(spinlock_t *lock);
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else #else
#define PREEMPT_LOCK_OFFSET 0 #define PREEMPT_LOCK_OFFSET 0
......
...@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY ...@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
config PREEMPT config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)" bool "Preemptible Kernel (Low-Latency Desktop)"
select PREEMPT_COUNT
help help
This option reduces the latency of the kernel by making This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section) all kernel code (that is not executing in a critical section)
...@@ -52,3 +53,5 @@ config PREEMPT ...@@ -52,3 +53,5 @@ config PREEMPT
endchoice endchoice
config PREEMPT_COUNT
bool
\ No newline at end of file
...@@ -2843,7 +2843,7 @@ void sched_fork(struct task_struct *p) ...@@ -2843,7 +2843,7 @@ void sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
p->on_cpu = 0; p->on_cpu = 0;
#endif #endif
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT_COUNT
/* Want to start with kernel preemption disabled. */ /* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1; task_thread_info(p)->preempt_count = 1;
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册