提交 613afbf8 编写于 作者: F Frederic Weisbecker 提交者: Ingo Molnar

sched: Pull up the might_sleep() check into cond_resched()

might_sleep() is called late-ish in cond_resched(), after the
need_resched()/preempt enabled/system running tests are
checked.

It's better to check the sleeps while atomic earlier and not
depend on some environment datas that reduce the chances to
detect a problem.

Also define cond_resched_*() helpers as macros, so that the
FILE/LINE reported in the sleeping while atomic warning
displays the real origin and not sched.h

Changes in v2:

 - Call __might_sleep() directly instead of might_sleep() which
   may call cond_resched()

 - Turn cond_resched() into a macro so that the file:line
   couple reported refers to the caller of cond_resched() and
   not __cond_resched() itself.

Changes in v3:

 - Also propagate this __might_sleep() pull up to
   cond_resched_lock() and cond_resched_softirq()
Signed-off-by: NFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1247725694-6082-6-git-send-email-fweisbec@gmail.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 6f80bd98
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/hardirq.h>
#include "internal.h" #include "internal.h"
int sysctl_vfs_cache_pressure __read_mostly = 100; int sysctl_vfs_cache_pressure __read_mostly = 100;
......
...@@ -2286,17 +2286,26 @@ static inline int need_resched(void) ...@@ -2286,17 +2286,26 @@ static inline int need_resched(void)
*/ */
extern int _cond_resched(void); extern int _cond_resched(void);
static inline int cond_resched(void) #define cond_resched() ({ \
{ __might_sleep(__FILE__, __LINE__, 0); \
return _cond_resched(); _cond_resched(); \
} })
extern int cond_resched_lock(spinlock_t * lock); extern int __cond_resched_lock(spinlock_t *lock);
extern int cond_resched_softirq(void);
static inline int cond_resched_bkl(void) #define cond_resched_lock(lock) ({ \
{ __might_sleep(__FILE__, __LINE__, PREEMPT_OFFSET); \
return _cond_resched(); __cond_resched_lock(lock); \
} })
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
__cond_resched_softirq(); \
})
#define cond_resched_bkl() cond_resched()
/* /*
* Does a critical section need to be broken due to another * Does a critical section need to be broken due to another
......
...@@ -6610,8 +6610,6 @@ static inline int should_resched(void) ...@@ -6610,8 +6610,6 @@ static inline int should_resched(void)
static void __cond_resched(void) static void __cond_resched(void)
{ {
__might_sleep(__FILE__, __LINE__, 0);
add_preempt_count(PREEMPT_ACTIVE); add_preempt_count(PREEMPT_ACTIVE);
schedule(); schedule();
sub_preempt_count(PREEMPT_ACTIVE); sub_preempt_count(PREEMPT_ACTIVE);
...@@ -6628,14 +6626,14 @@ int __sched _cond_resched(void) ...@@ -6628,14 +6626,14 @@ int __sched _cond_resched(void)
EXPORT_SYMBOL(_cond_resched); EXPORT_SYMBOL(_cond_resched);
/* /*
* cond_resched_lock() - if a reschedule is pending, drop the given lock, * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock. * call schedule, and on return reacquire the lock.
* *
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via * operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand). * spin_unlock(), once by hand).
*/ */
int cond_resched_lock(spinlock_t *lock) int __cond_resched_lock(spinlock_t *lock)
{ {
int resched = should_resched(); int resched = should_resched();
int ret = 0; int ret = 0;
...@@ -6651,9 +6649,9 @@ int cond_resched_lock(spinlock_t *lock) ...@@ -6651,9 +6649,9 @@ int cond_resched_lock(spinlock_t *lock)
} }
return ret; return ret;
} }
EXPORT_SYMBOL(cond_resched_lock); EXPORT_SYMBOL(__cond_resched_lock);
int __sched cond_resched_softirq(void) int __sched __cond_resched_softirq(void)
{ {
BUG_ON(!in_softirq()); BUG_ON(!in_softirq());
...@@ -6665,7 +6663,7 @@ int __sched cond_resched_softirq(void) ...@@ -6665,7 +6663,7 @@ int __sched cond_resched_softirq(void)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(cond_resched_softirq); EXPORT_SYMBOL(__cond_resched_softirq);
/** /**
* yield - yield the current processor to other threads. * yield - yield the current processor to other threads.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册