提交 da0c1e65 编写于 作者: K Kirill Tkhai 提交者: Ingo Molnar

sched: Add wrapper for checking task_struct::on_rq

Implement task_on_rq_queued() and use it everywhere instead of
on_rq check. No functional changes.

The only exception is we do not use the wrapper in
check_for_tasks(), because it requires to export
task_on_rq_queued() in global header files. Next patch in series
would return it back, so we do not twist it from here to there.
Signed-off-by: NKirill Tkhai <ktkhai@parallels.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1408528052.23412.87.camel@tkhaiSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 f36c019c
...@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) ...@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* A queue event has occurred, and we're going to schedule. In * A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update. * this case, we can save a useless back to back clock update.
*/ */
if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1; rq->skip_clock_update = 1;
} }
...@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ...@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
static void __migrate_swap_task(struct task_struct *p, int cpu) static void __migrate_swap_task(struct task_struct *p, int cpu)
{ {
if (p->on_rq) { if (task_on_rq_queued(p)) {
struct rq *src_rq, *dst_rq; struct rq *src_rq, *dst_rq;
src_rq = task_rq(p); src_rq = task_rq(p);
...@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data); ...@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data);
unsigned long wait_task_inactive(struct task_struct *p, long match_state) unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{ {
unsigned long flags; unsigned long flags;
int running, on_rq; int running, queued;
unsigned long ncsw; unsigned long ncsw;
struct rq *rq; struct rq *rq;
...@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) ...@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p); trace_sched_wait_task(p);
running = task_running(rq, p); running = task_running(rq, p);
on_rq = p->on_rq; queued = task_on_rq_queued(p);
ncsw = 0; ncsw = 0;
if (!match_state || p->state == match_state) if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
...@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) ...@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* running right now), it's preempted, and we should * running right now), it's preempted, and we should
* yield - it could be a while. * yield - it could be a while.
*/ */
if (unlikely(on_rq)) { if (unlikely(queued)) {
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
...@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{ {
activate_task(rq, p, en_flags); activate_task(rq, p, en_flags);
p->on_rq = 1; p->on_rq = TASK_ON_RQ_QUEUED;
/* if a worker is waking up, notify workqueue */ /* if a worker is waking up, notify workqueue */
if (p->flags & PF_WQ_WORKER) if (p->flags & PF_WQ_WORKER)
...@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) ...@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
int ret = 0; int ret = 0;
rq = __task_rq_lock(p); rq = __task_rq_lock(p);
if (p->on_rq) { if (task_on_rq_queued(p)) {
/* check_preempt_curr() may use rq clock */ /* check_preempt_curr() may use rq clock */
update_rq_clock(rq); update_rq_clock(rq);
ttwu_do_wakeup(rq, p, wake_flags); ttwu_do_wakeup(rq, p, wake_flags);
...@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
success = 1; /* we're going to change ->state */ success = 1; /* we're going to change ->state */
cpu = task_cpu(p); cpu = task_cpu(p);
if (p->on_rq && ttwu_remote(p, wake_flags)) if (task_on_rq_queued(p) && ttwu_remote(p, wake_flags))
goto stat; goto stat;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p) ...@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p)
if (!(p->state & TASK_NORMAL)) if (!(p->state & TASK_NORMAL))
goto out; goto out;
if (!p->on_rq) if (!task_on_rq_queued(p))
ttwu_activate(rq, p, ENQUEUE_WAKEUP); ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, 0); ttwu_do_wakeup(rq, p, 0);
...@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p) ...@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p)
init_task_runnable_average(p); init_task_runnable_average(p);
rq = __task_rq_lock(p); rq = __task_rq_lock(p);
activate_task(rq, p, 0); activate_task(rq, p, 0);
p->on_rq = 1; p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p, true); trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK); check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) ...@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
* project cycles that may never be accounted to this * project cycles that may never be accounted to this
* thread, breaking clock_gettime(). * thread, breaking clock_gettime().
*/ */
if (task_current(rq, p) && p->on_rq) { if (task_current(rq, p) && task_on_rq_queued(p)) {
update_rq_clock(rq); update_rq_clock(rq);
ns = rq_clock_task(rq) - p->se.exec_start; ns = rq_clock_task(rq) - p->se.exec_start;
if ((s64)ns < 0) if ((s64)ns < 0)
...@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) ...@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
* If we see ->on_cpu without ->on_rq, the task is leaving, and has * If we see ->on_cpu without ->on_rq, the task is leaving, and has
* been accounted, so we're correct here as well. * been accounted, so we're correct here as well.
*/ */
if (!p->on_cpu || !p->on_rq) if (!p->on_cpu || !task_on_rq_queued(p))
return p->se.sum_exec_runtime; return p->se.sum_exec_runtime;
#endif #endif
...@@ -2794,7 +2794,7 @@ static void __sched __schedule(void) ...@@ -2794,7 +2794,7 @@ static void __sched __schedule(void)
switch_count = &prev->nvcsw; switch_count = &prev->nvcsw;
} }
if (prev->on_rq || rq->skip_clock_update < 0) if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
update_rq_clock(rq); update_rq_clock(rq);
next = pick_next_task(rq, prev); next = pick_next_task(rq, prev);
...@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function); ...@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function);
*/ */
void rt_mutex_setprio(struct task_struct *p, int prio) void rt_mutex_setprio(struct task_struct *p, int prio)
{ {
int oldprio, on_rq, running, enqueue_flag = 0; int oldprio, queued, running, enqueue_flag = 0;
struct rq *rq; struct rq *rq;
const struct sched_class *prev_class; const struct sched_class *prev_class;
...@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
trace_sched_pi_setprio(p, prio); trace_sched_pi_setprio(p, prio);
oldprio = p->prio; oldprio = p->prio;
prev_class = p->sched_class; prev_class = p->sched_class;
on_rq = p->on_rq; queued = task_on_rq_queued(p);
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) if (queued)
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
if (running) if (running)
p->sched_class->put_prev_task(rq, p); p->sched_class->put_prev_task(rq, p);
...@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (running) if (running)
p->sched_class->set_curr_task(rq); p->sched_class->set_curr_task(rq);
if (on_rq) if (queued)
enqueue_task(rq, p, enqueue_flag); enqueue_task(rq, p, enqueue_flag);
check_class_changed(rq, p, prev_class, oldprio); check_class_changed(rq, p, prev_class, oldprio);
...@@ -3041,7 +3041,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -3041,7 +3041,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
void set_user_nice(struct task_struct *p, long nice) void set_user_nice(struct task_struct *p, long nice)
{ {
int old_prio, delta, on_rq; int old_prio, delta, queued;
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
...@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice)
p->static_prio = NICE_TO_PRIO(nice); p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock; goto out_unlock;
} }
on_rq = p->on_rq; queued = task_on_rq_queued(p);
if (on_rq) if (queued)
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice); p->static_prio = NICE_TO_PRIO(nice);
...@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice)
p->prio = effective_prio(p); p->prio = effective_prio(p);
delta = p->prio - old_prio; delta = p->prio - old_prio;
if (on_rq) { if (queued) {
enqueue_task(rq, p, 0); enqueue_task(rq, p, 0);
/* /*
* If the task increased its priority or is running and * If the task increased its priority or is running and
...@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p,
{ {
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
MAX_RT_PRIO - 1 - attr->sched_priority; MAX_RT_PRIO - 1 - attr->sched_priority;
int retval, oldprio, oldpolicy = -1, on_rq, running; int retval, oldprio, oldpolicy = -1, queued, running;
int policy = attr->sched_policy; int policy = attr->sched_policy;
unsigned long flags; unsigned long flags;
const struct sched_class *prev_class; const struct sched_class *prev_class;
...@@ -3541,9 +3541,9 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3541,9 +3541,9 @@ static int __sched_setscheduler(struct task_struct *p,
return 0; return 0;
} }
on_rq = p->on_rq; queued = task_on_rq_queued(p);
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) if (queued)
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
if (running) if (running)
p->sched_class->put_prev_task(rq, p); p->sched_class->put_prev_task(rq, p);
...@@ -3553,7 +3553,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3553,7 +3553,7 @@ static int __sched_setscheduler(struct task_struct *p,
if (running) if (running)
p->sched_class->set_curr_task(rq); p->sched_class->set_curr_task(rq);
if (on_rq) { if (queued) {
/* /*
* We enqueue to tail when the priority of a task is * We enqueue to tail when the priority of a task is
* increased (user space view). * increased (user space view).
...@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu) ...@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu)
rcu_read_unlock(); rcu_read_unlock();
rq->curr = rq->idle = idle; rq->curr = rq->idle = idle;
idle->on_rq = 1; idle->on_rq = TASK_ON_RQ_QUEUED;
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
idle->on_cpu = 1; idle->on_cpu = 1;
#endif #endif
...@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ...@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out; goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (p->on_rq) { if (task_on_rq_queued(p)) {
struct migration_arg arg = { p, dest_cpu }; struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */ /* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
...@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* If we're not on a rq, the next wake-up will ensure we're * If we're not on a rq, the next wake-up will ensure we're
* placed properly. * placed properly.
*/ */
if (p->on_rq) { if (task_on_rq_queued(p)) {
dequeue_task(rq_src, p, 0); dequeue_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu); set_task_cpu(p, dest_cpu);
enqueue_task(rq_dest, p, 0); enqueue_task(rq_dest, p, 0);
...@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid) ...@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid)
{ {
struct rq *rq; struct rq *rq;
unsigned long flags; unsigned long flags;
bool on_rq, running; bool queued, running;
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
on_rq = p->on_rq; queued = task_on_rq_queued(p);
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) if (queued)
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
if (running) if (running)
p->sched_class->put_prev_task(rq, p); p->sched_class->put_prev_task(rq, p);
...@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid) ...@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid)
if (running) if (running)
p->sched_class->set_curr_task(rq); p->sched_class->set_curr_task(rq);
if (on_rq) if (queued)
enqueue_task(rq, p, 0); enqueue_task(rq, p, 0);
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
} }
...@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p) ...@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
.sched_policy = SCHED_NORMAL, .sched_policy = SCHED_NORMAL,
}; };
int old_prio = p->prio; int old_prio = p->prio;
int on_rq; int queued;
on_rq = p->on_rq; queued = task_on_rq_queued(p);
if (on_rq) if (queued)
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
__setscheduler(rq, p, &attr); __setscheduler(rq, p, &attr);
if (on_rq) { if (queued) {
enqueue_task(rq, p, 0); enqueue_task(rq, p, 0);
resched_curr(rq); resched_curr(rq);
} }
...@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg) ...@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg)
void sched_move_task(struct task_struct *tsk) void sched_move_task(struct task_struct *tsk)
{ {
struct task_group *tg; struct task_group *tg;
int on_rq, running; int queued, running;
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
rq = task_rq_lock(tsk, &flags); rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk); running = task_current(rq, tsk);
on_rq = tsk->on_rq; queued = task_on_rq_queued(tsk);
if (on_rq) if (queued)
dequeue_task(rq, tsk, 0); dequeue_task(rq, tsk, 0);
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk); tsk->sched_class->put_prev_task(rq, tsk);
...@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk) ...@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group) if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq); tsk->sched_class->task_move_group(tsk, queued);
else else
#endif #endif
set_task_rq(tsk, task_cpu(tsk)); set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->set_curr_task(rq); tsk->sched_class->set_curr_task(rq);
if (on_rq) if (queued)
enqueue_task(rq, tsk, 0); enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, tsk, &flags); task_rq_unlock(rq, tsk, &flags);
......
...@@ -530,7 +530,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) ...@@ -530,7 +530,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
update_rq_clock(rq); update_rq_clock(rq);
dl_se->dl_throttled = 0; dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0; dl_se->dl_yielded = 0;
if (p->on_rq) { if (task_on_rq_queued(p)) {
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (task_has_dl_policy(rq->curr)) if (task_has_dl_policy(rq->curr))
check_preempt_curr_dl(rq, p, 0); check_preempt_curr_dl(rq, p, 0);
...@@ -1030,7 +1030,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) ...@@ -1030,7 +1030,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
* means a stop task can slip in, in which case we need to * means a stop task can slip in, in which case we need to
* re-start task selection. * re-start task selection.
*/ */
if (rq->stop && rq->stop->on_rq) if (rq->stop && task_on_rq_queued(rq->stop))
return RETRY_TASK; return RETRY_TASK;
} }
...@@ -1257,7 +1257,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) ...@@ -1257,7 +1257,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
if (unlikely(task_rq(task) != rq || if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu, !cpumask_test_cpu(later_rq->cpu,
&task->cpus_allowed) || &task->cpus_allowed) ||
task_running(rq, task) || !task->on_rq)) { task_running(rq, task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq); double_unlock_balance(rq, later_rq);
later_rq = NULL; later_rq = NULL;
break; break;
...@@ -1296,7 +1297,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) ...@@ -1296,7 +1297,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
BUG_ON(task_current(rq, p)); BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!p->on_rq); BUG_ON(!task_on_rq_queued(p));
BUG_ON(!dl_task(p)); BUG_ON(!dl_task(p));
return p; return p;
...@@ -1443,7 +1444,7 @@ static int pull_dl_task(struct rq *this_rq) ...@@ -1443,7 +1444,7 @@ static int pull_dl_task(struct rq *this_rq)
dl_time_before(p->dl.deadline, dl_time_before(p->dl.deadline,
this_rq->dl.earliest_dl.curr))) { this_rq->dl.earliest_dl.curr))) {
WARN_ON(p == src_rq->curr); WARN_ON(p == src_rq->curr);
WARN_ON(!p->on_rq); WARN_ON(!task_on_rq_queued(p));
/* /*
* Then we pull iff p has actually an earlier * Then we pull iff p has actually an earlier
...@@ -1596,7 +1597,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) ...@@ -1596,7 +1597,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (unlikely(p->dl.dl_throttled)) if (unlikely(p->dl.dl_throttled))
return; return;
if (p->on_rq && rq->curr != p) { if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
/* Only reschedule if pushing failed */ /* Only reschedule if pushing failed */
...@@ -1614,7 +1615,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) ...@@ -1614,7 +1615,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
static void prio_changed_dl(struct rq *rq, struct task_struct *p, static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio) int oldprio)
{ {
if (p->on_rq || rq->curr == p) { if (task_on_rq_queued(p) || rq->curr == p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* This might be too much, but unfortunately * This might be too much, but unfortunately
......
...@@ -7494,7 +7494,7 @@ static void task_fork_fair(struct task_struct *p) ...@@ -7494,7 +7494,7 @@ static void task_fork_fair(struct task_struct *p)
static void static void
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
{ {
if (!p->on_rq) if (!task_on_rq_queued(p))
return; return;
/* /*
...@@ -7519,11 +7519,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) ...@@ -7519,11 +7519,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
* switched back to the fair class the enqueue_entity(.flags=0) will * switched back to the fair class the enqueue_entity(.flags=0) will
* do the right thing. * do the right thing.
* *
* If it's on_rq, then the dequeue_entity(.flags=0) will already * If it's queued, then the dequeue_entity(.flags=0) will already
* have normalized the vruntime, if it's !on_rq, then only when * have normalized the vruntime, if it's !queued, then only when
* the task is sleeping will it still have non-normalized vruntime. * the task is sleeping will it still have non-normalized vruntime.
*/ */
if (!p->on_rq && p->state != TASK_RUNNING) { if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
/* /*
* Fix up our vruntime so that the current sleep doesn't * Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus. * cause 'unlimited' sleep bonus.
...@@ -7558,7 +7558,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) ...@@ -7558,7 +7558,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
*/ */
se->depth = se->parent ? se->parent->depth + 1 : 0; se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif #endif
if (!p->on_rq) if (!task_on_rq_queued(p))
return; return;
/* /*
...@@ -7604,7 +7604,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -7604,7 +7604,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static void task_move_group_fair(struct task_struct *p, int on_rq) static void task_move_group_fair(struct task_struct *p, int queued)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
...@@ -7623,7 +7623,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) ...@@ -7623,7 +7623,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* fair sleeper stuff for the first placement, but who cares. * fair sleeper stuff for the first placement, but who cares.
*/ */
/* /*
* When !on_rq, vruntime of the task has usually NOT been normalized. * When !queued, vruntime of the task has usually NOT been normalized.
* But there are some cases where it has already been normalized: * But there are some cases where it has already been normalized:
* *
* - Moving a forked child which is waiting for being woken up by * - Moving a forked child which is waiting for being woken up by
...@@ -7634,14 +7634,14 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) ...@@ -7634,14 +7634,14 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* To prevent boost or penalty in the new cfs_rq caused by delta * To prevent boost or penalty in the new cfs_rq caused by delta
* min_vruntime between the two cfs_rqs, we skip vruntime adjustment. * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
*/ */
if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING)) if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
on_rq = 1; queued = 1;
if (!on_rq) if (!queued)
se->vruntime -= cfs_rq_of(se)->min_vruntime; se->vruntime -= cfs_rq_of(se)->min_vruntime;
set_task_rq(p, task_cpu(p)); set_task_rq(p, task_cpu(p));
se->depth = se->parent ? se->parent->depth + 1 : 0; se->depth = se->parent ? se->parent->depth + 1 : 0;
if (!on_rq) { if (!queued) {
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
se->vruntime += cfs_rq->min_vruntime; se->vruntime += cfs_rq->min_vruntime;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -1448,7 +1448,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) ...@@ -1448,7 +1448,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
* means a dl or stop task can slip in, in which case we need * means a dl or stop task can slip in, in which case we need
* to re-start task selection. * to re-start task selection.
*/ */
if (unlikely((rq->stop && rq->stop->on_rq) || if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
rq->dl.dl_nr_running)) rq->dl.dl_nr_running))
return RETRY_TASK; return RETRY_TASK;
} }
...@@ -1624,7 +1624,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) ...@@ -1624,7 +1624,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(lowest_rq->cpu, !cpumask_test_cpu(lowest_rq->cpu,
tsk_cpus_allowed(task)) || tsk_cpus_allowed(task)) ||
task_running(rq, task) || task_running(rq, task) ||
!task->on_rq)) { !task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq); double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL; lowest_rq = NULL;
...@@ -1658,7 +1658,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) ...@@ -1658,7 +1658,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
BUG_ON(task_current(rq, p)); BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!p->on_rq); BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p)); BUG_ON(!rt_task(p));
return p; return p;
...@@ -1809,7 +1809,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1809,7 +1809,7 @@ static int pull_rt_task(struct rq *this_rq)
*/ */
if (p && (p->prio < this_rq->rt.highest_prio.curr)) { if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr); WARN_ON(p == src_rq->curr);
WARN_ON(!p->on_rq); WARN_ON(!task_on_rq_queued(p));
/* /*
* There's a chance that p is higher in priority * There's a chance that p is higher in priority
...@@ -1870,7 +1870,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, ...@@ -1870,7 +1870,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
BUG_ON(!rt_task(p)); BUG_ON(!rt_task(p));
if (!p->on_rq) if (!task_on_rq_queued(p))
return; return;
weight = cpumask_weight(new_mask); weight = cpumask_weight(new_mask);
...@@ -1936,7 +1936,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) ...@@ -1936,7 +1936,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
* we may need to handle the pulling of RT tasks * we may need to handle the pulling of RT tasks
* now. * now.
*/ */
if (!p->on_rq || rq->rt.rt_nr_running) if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return; return;
if (pull_rt_task(rq)) if (pull_rt_task(rq))
...@@ -1970,7 +1970,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) ...@@ -1970,7 +1970,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
* If that current running task is also an RT task * If that current running task is also an RT task
* then see if we can move to another run queue. * then see if we can move to another run queue.
*/ */
if (p->on_rq && rq->curr != p) { if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
/* Don't resched if we changed runqueues */ /* Don't resched if we changed runqueues */
...@@ -1989,7 +1989,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) ...@@ -1989,7 +1989,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
static void static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{ {
if (!p->on_rq) if (!task_on_rq_queued(p))
return; return;
if (rq->curr == p) { if (rq->curr == p) {
......
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
struct rq; struct rq;
/* task_struct::on_rq states: */
#define TASK_ON_RQ_QUEUED 1
extern __read_mostly int scheduler_running; extern __read_mostly int scheduler_running;
extern unsigned long calc_load_update; extern unsigned long calc_load_update;
...@@ -942,6 +945,10 @@ static inline int task_running(struct rq *rq, struct task_struct *p) ...@@ -942,6 +945,10 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#endif #endif
} }
static inline int task_on_rq_queued(struct task_struct *p)
{
return p->on_rq == TASK_ON_RQ_QUEUED;
}
#ifndef prepare_arch_switch #ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0) # define prepare_arch_switch(next) do { } while (0)
......
...@@ -28,7 +28,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev) ...@@ -28,7 +28,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev)
{ {
struct task_struct *stop = rq->stop; struct task_struct *stop = rq->stop;
if (!stop || !stop->on_rq) if (!stop || !task_on_rq_queued(stop))
return NULL; return NULL;
put_prev_task(rq, prev); put_prev_task(rq, prev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册