提交 1019a359 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched/deadline: Fix stale yield state

When we fail to start the deadline timer in update_curr_dl(), we
forget to clear ->dl_yielded, resulting in wrecked time keeping.

Since the natural place to clear both ->dl_yielded and ->dl_throttled
is in replenish_dl_entity(); both are after all waiting for that event;
make it so.

Luckily since 67dfa1b7 ("sched/deadline: Implement
cancel_dl_timer() to use in switched_from_dl()") the
task_on_rq_queued() condition in dl_task_timer() must be true, and can
therefore call enqueue_task_dl() unconditionally.
Reported-by: NWanpeng Li <wanpeng.li@linux.intel.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kirill Tkhai <ktkhai@parallels.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1416962647-76792-4-git-send-email-wanpeng.li@linux.intel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 a7bebf48
...@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, ...@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime; dl_se->runtime = pi_se->dl_runtime;
} }
if (dl_se->dl_yielded)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
} }
/* /*
...@@ -536,9 +541,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) ...@@ -536,9 +541,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
sched_clock_tick(); sched_clock_tick();
update_rq_clock(rq); update_rq_clock(rq);
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
if (task_on_rq_queued(p)) {
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (dl_task(rq->curr)) if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0); check_preempt_curr_dl(rq, p, 0);
...@@ -552,7 +554,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) ...@@ -552,7 +554,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
if (has_pushable_dl_tasks(rq)) if (has_pushable_dl_tasks(rq))
push_dl_task(rq); push_dl_task(rq);
#endif #endif
}
unlock: unlock:
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
...@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq) ...@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq)
dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
if (dl_runtime_exceeded(rq, dl_se)) { if (dl_runtime_exceeded(rq, dl_se)) {
__dequeue_task_dl(rq, curr, 0);
if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
dl_se->dl_throttled = 1; dl_se->dl_throttled = 1;
else __dequeue_task_dl(rq, curr, 0);
if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl)) if (!is_leftmost(curr, &rq->dl))
...@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) ...@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* its rq, the bandwidth timer callback (which clearly has not * its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this. * run yet) will take care of this.
*/ */
if (p->dl.dl_throttled) if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
return; return;
enqueue_dl_entity(&p->dl, pi_se, flags); enqueue_dl_entity(&p->dl, pi_se, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册