提交 17bc14b7 编写于 作者: L Linus Torvalds

Revert "sched: Update_cfs_shares at period edge"

This reverts commit f269ae04.

It turns out it causes a very noticeable interactivity regression with
CONFIG_SCHED_AUTOGROUP (test-case: "make -j32" of the kernel in a
terminal window, while scrolling in a browser - the autogrouping means
that the two end up in separate cgroups, and the browser should be
smooth as silk despite the high load).

Says Paul Turner:
 "It seems that the update-throttling on the wake-side is reducing the
  interactive tasks' ability to preempt.  While I suspect the right
  longer term answer here is force these updates only in the
  cross-cgroup case; this is less trivial.  For this release I believe
  the right answer is either going to be a revert or restore the updates
  on the enqueue-side."
Reported-by: NLinus Torvalds <torvalds@linux-foundation.org>
Bisected-by: NMike Galbraith <efault@gmx.de>
Acked-by: NPaul Turner <pjt@google.com>
Acked-by: NIngo Molnar <mingo@kernel.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 7313264b
...@@ -1265,7 +1265,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) ...@@ -1265,7 +1265,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
} }
__update_cfs_rq_tg_load_contrib(cfs_rq, force_update); __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
update_cfs_shares(cfs_rq);
} }
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
...@@ -1475,8 +1474,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -1475,8 +1474,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'. * Update run-time statistics of the 'current'.
*/ */
update_curr(cfs_rq); update_curr(cfs_rq);
account_entity_enqueue(cfs_rq, se);
enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP); enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);
if (flags & ENQUEUE_WAKEUP) { if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0); place_entity(cfs_rq, se, 0);
...@@ -1549,6 +1549,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -1549,6 +1549,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'. * Update run-time statistics of the 'current'.
*/ */
update_curr(cfs_rq); update_curr(cfs_rq);
dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
update_stats_dequeue(cfs_rq, se); update_stats_dequeue(cfs_rq, se);
if (flags & DEQUEUE_SLEEP) { if (flags & DEQUEUE_SLEEP) {
...@@ -1568,8 +1569,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -1568,8 +1569,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr) if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se); __dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se); account_entity_dequeue(cfs_rq, se);
dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
/* /*
* Normalize the entity after updating the min_vruntime because the * Normalize the entity after updating the min_vruntime because the
...@@ -1583,7 +1584,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -1583,7 +1584,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
return_cfs_rq_runtime(cfs_rq); return_cfs_rq_runtime(cfs_rq);
update_min_vruntime(cfs_rq); update_min_vruntime(cfs_rq);
se->on_rq = 0; update_cfs_shares(cfs_rq);
} }
/* /*
...@@ -2595,8 +2596,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -2595,8 +2596,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq)) if (cfs_rq_throttled(cfs_rq))
break; break;
update_cfs_shares(cfs_rq);
update_entity_load_avg(se, 1); update_entity_load_avg(se, 1);
update_cfs_rq_blocked_load(cfs_rq, 0);
} }
if (!se) { if (!se) {
...@@ -2656,8 +2657,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -2656,8 +2657,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq)) if (cfs_rq_throttled(cfs_rq))
break; break;
update_cfs_shares(cfs_rq);
update_entity_load_avg(se, 1); update_entity_load_avg(se, 1);
update_cfs_rq_blocked_load(cfs_rq, 0);
} }
if (!se) { if (!se) {
...@@ -5837,11 +5838,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) ...@@ -5837,11 +5838,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
se = tg->se[i]; se = tg->se[i];
/* Propagate contribution to hierarchy */ /* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
for_each_sched_entity(se) { for_each_sched_entity(se)
update_cfs_shares(group_cfs_rq(se)); update_cfs_shares(group_cfs_rq(se));
/* update contribution to parent */
update_entity_load_avg(se, 1);
}
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册