提交 e9c84311 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Add TASK_WAKING

We're going to want to drop rq->lock in try_to_wake_up() for a
longer period of time, however we also want to deal with concurrent
waking of the same task, which is currently handled by holding
rq->lock.

So introduce a new TASK state, namely TASK_WAKING, which indicates
someone is already waking the task (other wakers will fail p->state
& state).

We also keep preemption disabled over the whole ttwu().
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 5f3edc1b
...@@ -190,6 +190,7 @@ extern unsigned long long time_sync_thresh; ...@@ -190,6 +190,7 @@ extern unsigned long long time_sync_thresh;
/* in tsk->state again */ /* in tsk->state again */
#define TASK_DEAD 64 #define TASK_DEAD 64
#define TASK_WAKEKILL 128 #define TASK_WAKEKILL 128
#define TASK_WAKING 256
/* Convenience macros for the sake of set_task_state */ /* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
......
...@@ -2310,7 +2310,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2310,7 +2310,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
{ {
int cpu, orig_cpu, this_cpu, success = 0; int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags; unsigned long flags;
long old_state;
struct rq *rq; struct rq *rq;
if (!sched_feat(SYNC_WAKEUPS)) if (!sched_feat(SYNC_WAKEUPS))
...@@ -2332,11 +2331,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2332,11 +2331,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
} }
#endif #endif
this_cpu = get_cpu();
smp_wmb(); smp_wmb();
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
update_rq_clock(rq); update_rq_clock(rq);
old_state = p->state; if (!(p->state & state))
if (!(old_state & state))
goto out; goto out;
if (p->se.on_rq) if (p->se.on_rq)
...@@ -2344,27 +2344,25 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2344,27 +2344,25 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
cpu = task_cpu(p); cpu = task_cpu(p);
orig_cpu = cpu; orig_cpu = cpu;
this_cpu = smp_processor_id();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (unlikely(task_running(rq, p))) if (unlikely(task_running(rq, p)))
goto out_activate; goto out_activate;
/*
* In order to handle concurrent wakeups and release the rq->lock
* we put the task in TASK_WAKING state.
*/
p->state = TASK_WAKING;
task_rq_unlock(rq, &flags);
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync); cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync);
if (cpu != orig_cpu) { if (cpu != orig_cpu)
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
task_rq_unlock(rq, &flags);
/* might preempt at this point */
rq = task_rq_lock(p, &flags);
old_state = p->state;
if (!(old_state & state))
goto out;
if (p->se.on_rq)
goto out_running;
this_cpu = smp_processor_id(); rq = task_rq_lock(p, &flags);
cpu = task_cpu(p); WARN_ON(p->state != TASK_WAKING);
} cpu = task_cpu(p);
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
schedstat_inc(rq, ttwu_count); schedstat_inc(rq, ttwu_count);
...@@ -2422,6 +2420,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2422,6 +2420,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
#endif #endif
out: out:
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
put_cpu();
return success; return success;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册