提交 1af3ed3d 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Unify load_balance{,_newidle}()

load_balance() and load_balance_newidle() look remarkably similar, one
key point they differ in is the condition on when to active balance.

So split out that logic into a separate function.

One side effect is that previously load_balance_newidle() used to fail
and return -1 under these conditions, whereas now it doesn't. I've not
yet fully figured out the whole -1 return case for either
load_balance{,_newidle}().
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 baa8c110
...@@ -2816,6 +2816,39 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, ...@@ -2816,6 +2816,39 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
/* Working cpumask for load_balance and load_balance_newidle. */ /* Working cpumask for load_balance and load_balance_newidle. */
static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
{
if (idle == CPU_NEWLY_IDLE) {
/*
* The only task running in a non-idle cpu can be moved to this
* cpu in an attempt to completely freeup the other CPU
* package.
*
* The package power saving logic comes from
* find_busiest_group(). If there are no imbalance, then
* f_b_g() will return NULL. However when sched_mc={1,2} then
* f_b_g() will select a group from which a running task may be
* pulled to this cpu in order to make the other package idle.
* If there is no opportunity to make a package idle and if
* there are no imbalance, then f_b_g() will return NULL and no
* action will be taken in load_balance_newidle().
*
* Under normal task pull operation due to imbalance, there
* will be more than one task in the source run queue and
* move_tasks() will succeed. ld_moved will be true and this
* active balance code will not be triggered.
*/
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
return 0;
if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
return 0;
}
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}
/* /*
* Check this_cpu to ensure it is balanced within domain. Attempt to move * Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance. * tasks if there is an imbalance.
...@@ -2902,8 +2935,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -2902,8 +2935,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_failed[idle]); schedstat_inc(sd, lb_failed[idle]);
sd->nr_balance_failed++; sd->nr_balance_failed++;
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { if (need_active_balance(sd, sd_idle, idle)) {
raw_spin_lock_irqsave(&busiest->lock, flags); raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr /* don't kick the migration_thread, if the curr
...@@ -3049,66 +3081,37 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) ...@@ -3049,66 +3081,37 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
int active_balance = 0; int active_balance = 0;
schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && sd->nr_balance_failed++;
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
return -1;
if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
return -1;
if (sd->nr_balance_failed++ < 2) if (need_active_balance(sd, sd_idle, CPU_NEWLY_IDLE)) {
return -1; double_lock_balance(this_rq, busiest);
/* /*
* The only task running in a non-idle cpu can be moved to this * don't kick the migration_thread, if the curr
* cpu in an attempt to completely freeup the other CPU * task on busiest cpu can't be moved to this_cpu
* package. The same method used to move task in load_balance() */
* have been extended for load_balance_newidle() to speedup if (!cpumask_test_cpu(this_cpu,
* consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) &busiest->curr->cpus_allowed)) {
* double_unlock_balance(this_rq, busiest);
* The package power saving logic comes from all_pinned = 1;
* find_busiest_group(). If there are no imbalance, then return ld_moved;
* f_b_g() will return NULL. However when sched_mc={1,2} then }
* f_b_g() will select a group from which a running task may be
* pulled to this cpu in order to make the other package idle.
* If there is no opportunity to make a package idle and if
* there are no imbalance, then f_b_g() will return NULL and no
* action will be taken in load_balance_newidle().
*
* Under normal task pull operation due to imbalance, there
* will be more than one task in the source run queue and
* move_tasks() will succeed. ld_moved will be true and this
* active balance code will not be triggered.
*/
/* Lock busiest in correct order while this_rq is held */ if (!busiest->active_balance) {
double_lock_balance(this_rq, busiest); busiest->active_balance = 1;
busiest->push_cpu = this_cpu;
active_balance = 1;
}
/*
* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
double_unlock_balance(this_rq, busiest); double_unlock_balance(this_rq, busiest);
all_pinned = 1; /*
return ld_moved; * Should not call ttwu while holding a rq->lock
} */
raw_spin_unlock(&this_rq->lock);
if (!busiest->active_balance) { if (active_balance)
busiest->active_balance = 1; wake_up_process(busiest->migration_thread);
busiest->push_cpu = this_cpu; raw_spin_lock(&this_rq->lock);
active_balance = 1;
} }
double_unlock_balance(this_rq, busiest);
/*
* Should not call ttwu while holding a rq->lock
*/
raw_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
raw_spin_lock(&this_rq->lock);
} else } else
sd->nr_balance_failed = 0; sd->nr_balance_failed = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册