提交 c3442697 编写于 作者: P Paul E. McKenney

softirq: Eliminate unused cond_resched_softirq() macro

The cond_resched_softirq() macro is not used anywhere in mainline, so
this commit simplifies the kernel by eliminating it.
Suggested-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NEric Dumazet <edumazet@google.com>
Tested-by: NNicholas Piggin <npiggin@gmail.com>
上级 cee43939
...@@ -1613,7 +1613,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) ...@@ -1613,7 +1613,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* explicit rescheduling in places that are safe. The return * explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact. * value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/ */
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
extern int _cond_resched(void); extern int _cond_resched(void);
...@@ -1633,13 +1632,6 @@ extern int __cond_resched_lock(spinlock_t *lock); ...@@ -1633,13 +1632,6 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \ __cond_resched_lock(lock); \
}) })
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
static inline void cond_resched_rcu(void) static inline void cond_resched_rcu(void)
{ {
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
......
...@@ -5012,20 +5012,6 @@ int __cond_resched_lock(spinlock_t *lock) ...@@ -5012,20 +5012,6 @@ int __cond_resched_lock(spinlock_t *lock)
} }
EXPORT_SYMBOL(__cond_resched_lock); EXPORT_SYMBOL(__cond_resched_lock);
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
local_bh_enable();
preempt_schedule_common();
local_bh_disable();
return 1;
}
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
/** /**
* yield - yield the current processor to other threads. * yield - yield the current processor to other threads.
* *
......
...@@ -145,8 +145,7 @@ static void __local_bh_enable(unsigned int cnt) ...@@ -145,8 +145,7 @@ static void __local_bh_enable(unsigned int cnt)
} }
/* /*
* Special-case - softirqs can safely be enabled in * Special-case - softirqs can safely be enabled by __do_softirq(),
* cond_resched_softirq(), or by __do_softirq(),
* without processing still-pending softirqs: * without processing still-pending softirqs:
*/ */
void _local_bh_enable(void) void _local_bh_enable(void)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册