提交 bd939f45 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched/fair: Propagate 'struct lb_env' usage into find_busiest_group

More function argument passing reduction.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-v66ivjfqdiqdso01lqgqx6qf@git.kernel.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 0ce90475
...@@ -3082,7 +3082,7 @@ struct lb_env { ...@@ -3082,7 +3082,7 @@ struct lb_env {
struct rq *dst_rq; struct rq *dst_rq;
enum cpu_idle_type idle; enum cpu_idle_type idle;
long load_move; long imbalance;
unsigned int flags; unsigned int flags;
unsigned int loop; unsigned int loop;
...@@ -3218,7 +3218,7 @@ static unsigned long task_h_load(struct task_struct *p); ...@@ -3218,7 +3218,7 @@ static unsigned long task_h_load(struct task_struct *p);
static const unsigned int sched_nr_migrate_break = 32; static const unsigned int sched_nr_migrate_break = 32;
/* /*
* move_tasks tries to move up to load_move weighted load from busiest to * move_tasks tries to move up to imbalance weighted load from busiest to
* this_rq, as part of a balancing operation within domain "sd". * this_rq, as part of a balancing operation within domain "sd".
* Returns 1 if successful and 0 otherwise. * Returns 1 if successful and 0 otherwise.
* *
...@@ -3231,7 +3231,7 @@ static int move_tasks(struct lb_env *env) ...@@ -3231,7 +3231,7 @@ static int move_tasks(struct lb_env *env)
unsigned long load; unsigned long load;
int pulled = 0; int pulled = 0;
if (env->load_move <= 0) if (env->imbalance <= 0)
return 0; return 0;
while (!list_empty(tasks)) { while (!list_empty(tasks)) {
...@@ -3257,7 +3257,7 @@ static int move_tasks(struct lb_env *env) ...@@ -3257,7 +3257,7 @@ static int move_tasks(struct lb_env *env)
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
goto next; goto next;
if ((load / 2) > env->load_move) if ((load / 2) > env->imbalance)
goto next; goto next;
if (!can_migrate_task(p, env)) if (!can_migrate_task(p, env))
...@@ -3265,7 +3265,7 @@ static int move_tasks(struct lb_env *env) ...@@ -3265,7 +3265,7 @@ static int move_tasks(struct lb_env *env)
move_task(p, env); move_task(p, env);
pulled++; pulled++;
env->load_move -= load; env->imbalance -= load;
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
/* /*
...@@ -3281,7 +3281,7 @@ static int move_tasks(struct lb_env *env) ...@@ -3281,7 +3281,7 @@ static int move_tasks(struct lb_env *env)
* We only want to steal up to the prescribed amount of * We only want to steal up to the prescribed amount of
* weighted load. * weighted load.
*/ */
if (env->load_move <= 0) if (env->imbalance <= 0)
break; break;
continue; continue;
...@@ -3578,10 +3578,9 @@ static inline void update_sd_power_savings_stats(struct sched_group *group, ...@@ -3578,10 +3578,9 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
/** /**
* check_power_save_busiest_group - see if there is potential for some power-savings balance * check_power_save_busiest_group - see if there is potential for some power-savings balance
* @env: load balance environment
* @sds: Variable containing the statistics of the sched_domain * @sds: Variable containing the statistics of the sched_domain
* under consideration. * under consideration.
* @this_cpu: Cpu at which we're currently performing load-balancing.
* @imbalance: Variable to store the imbalance.
* *
* Description: * Description:
* Check if we have potential to perform some power-savings balance. * Check if we have potential to perform some power-savings balance.
...@@ -3591,8 +3590,8 @@ static inline void update_sd_power_savings_stats(struct sched_group *group, ...@@ -3591,8 +3590,8 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
* Returns 1 if there is potential to perform power-savings balance. * Returns 1 if there is potential to perform power-savings balance.
* Else returns 0. * Else returns 0.
*/ */
static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, static inline
int this_cpu, unsigned long *imbalance) int check_power_save_busiest_group(struct lb_env *env, struct sd_lb_stats *sds)
{ {
if (!sds->power_savings_balance) if (!sds->power_savings_balance)
return 0; return 0;
...@@ -3601,7 +3600,7 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, ...@@ -3601,7 +3600,7 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
sds->group_leader == sds->group_min) sds->group_leader == sds->group_min)
return 0; return 0;
*imbalance = sds->min_load_per_task; env->imbalance = sds->min_load_per_task;
sds->busiest = sds->group_min; sds->busiest = sds->group_min;
return 1; return 1;
...@@ -3620,8 +3619,8 @@ static inline void update_sd_power_savings_stats(struct sched_group *group, ...@@ -3620,8 +3619,8 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
return; return;
} }
static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, static inline
int this_cpu, unsigned long *imbalance) int check_power_save_busiest_group(struct lb_env *env, struct sd_lb_stats *sds)
{ {
return 0; return 0;
} }
...@@ -3765,25 +3764,22 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) ...@@ -3765,25 +3764,22 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
* update_sg_lb_stats - Update sched_group's statistics for load balancing. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @sd: The sched_domain whose statistics are to be updated. * @sd: The sched_domain whose statistics are to be updated.
* @group: sched_group whose statistics are to be updated. * @group: sched_group whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
* @load_idx: Load index of sched_domain of this_cpu for load calc. * @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu. * @local_group: Does group contain this_cpu.
* @cpus: Set of cpus considered for load balancing. * @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance. * @balance: Should we balance.
* @sgs: variable to hold the statistics for this group. * @sgs: variable to hold the statistics for this group.
*/ */
static inline void update_sg_lb_stats(struct sched_domain *sd, static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int this_cpu, struct sched_group *group, int load_idx,
enum cpu_idle_type idle, int load_idx,
int local_group, const struct cpumask *cpus, int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs) int *balance, struct sg_lb_stats *sgs)
{ {
unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
int i;
unsigned int balance_cpu = -1; unsigned int balance_cpu = -1;
unsigned long balance_load = ~0UL; unsigned long balance_load = ~0UL;
unsigned long avg_load_per_task = 0; unsigned long avg_load_per_task = 0;
int i;
if (local_group) if (local_group)
balance_cpu = group_first_cpu(group); balance_cpu = group_first_cpu(group);
...@@ -3827,15 +3823,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, ...@@ -3827,15 +3823,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
* to do the newly idle load balance. * to do the newly idle load balance.
*/ */
if (local_group) { if (local_group) {
if (idle != CPU_NEWLY_IDLE) { if (env->idle != CPU_NEWLY_IDLE) {
if (balance_cpu != this_cpu || if (balance_cpu != env->dst_cpu ||
cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) { cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) {
*balance = 0; *balance = 0;
return; return;
} }
update_group_power(sd, this_cpu); update_group_power(env->sd, env->dst_cpu);
} else if (time_after_eq(jiffies, group->sgp->next_update)) } else if (time_after_eq(jiffies, group->sgp->next_update))
update_group_power(sd, this_cpu); update_group_power(env->sd, env->dst_cpu);
} }
/* Adjust by relative CPU power of the group */ /* Adjust by relative CPU power of the group */
...@@ -3859,7 +3855,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, ...@@ -3859,7 +3855,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
SCHED_POWER_SCALE); SCHED_POWER_SCALE);
if (!sgs->group_capacity) if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group); sgs->group_capacity = fix_small_capacity(env->sd, group);
sgs->group_weight = group->group_weight; sgs->group_weight = group->group_weight;
if (sgs->group_capacity > sgs->sum_nr_running) if (sgs->group_capacity > sgs->sum_nr_running)
...@@ -3877,11 +3873,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, ...@@ -3877,11 +3873,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
* Determine if @sg is a busier group than the previously selected * Determine if @sg is a busier group than the previously selected
* busiest group. * busiest group.
*/ */
static bool update_sd_pick_busiest(struct sched_domain *sd, static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds, struct sd_lb_stats *sds,
struct sched_group *sg, struct sched_group *sg,
struct sg_lb_stats *sgs, struct sg_lb_stats *sgs)
int this_cpu)
{ {
if (sgs->avg_load <= sds->max_load) if (sgs->avg_load <= sds->max_load)
return false; return false;
...@@ -3897,8 +3892,8 @@ static bool update_sd_pick_busiest(struct sched_domain *sd, ...@@ -3897,8 +3892,8 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
* numbered CPUs in the group, therefore mark all groups * numbered CPUs in the group, therefore mark all groups
* higher than ourself as busy. * higher than ourself as busy.
*/ */
if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running && if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
this_cpu < group_first_cpu(sg)) { env->dst_cpu < group_first_cpu(sg)) {
if (!sds->busiest) if (!sds->busiest)
return true; return true;
...@@ -3918,28 +3913,28 @@ static bool update_sd_pick_busiest(struct sched_domain *sd, ...@@ -3918,28 +3913,28 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
* @balance: Should we balance. * @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain. * @sds: variable to hold the statistics for this sched_domain.
*/ */
static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, static inline void update_sd_lb_stats(struct lb_env *env,
enum cpu_idle_type idle, const struct cpumask *cpus, const struct cpumask *cpus,
int *balance, struct sd_lb_stats *sds) int *balance, struct sd_lb_stats *sds)
{ {
struct sched_domain *child = sd->child; struct sched_domain *child = env->sd->child;
struct sched_group *sg = sd->groups; struct sched_group *sg = env->sd->groups;
struct sg_lb_stats sgs; struct sg_lb_stats sgs;
int load_idx, prefer_sibling = 0; int load_idx, prefer_sibling = 0;
if (child && child->flags & SD_PREFER_SIBLING) if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1; prefer_sibling = 1;
init_sd_power_savings_stats(sd, sds, idle); init_sd_power_savings_stats(env->sd, sds, env->idle);
load_idx = get_sd_load_idx(sd, idle); load_idx = get_sd_load_idx(env->sd, env->idle);
do { do {
int local_group; int local_group;
local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg)); local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
memset(&sgs, 0, sizeof(sgs)); memset(&sgs, 0, sizeof(sgs));
update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, update_sg_lb_stats(env, sg, load_idx, local_group,
local_group, cpus, balance, &sgs); cpus, balance, &sgs);
if (local_group && !(*balance)) if (local_group && !(*balance))
return; return;
...@@ -3967,7 +3962,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, ...@@ -3967,7 +3962,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
sds->this_load_per_task = sgs.sum_weighted_load; sds->this_load_per_task = sgs.sum_weighted_load;
sds->this_has_capacity = sgs.group_has_capacity; sds->this_has_capacity = sgs.group_has_capacity;
sds->this_idle_cpus = sgs.idle_cpus; sds->this_idle_cpus = sgs.idle_cpus;
} else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
sds->max_load = sgs.avg_load; sds->max_load = sgs.avg_load;
sds->busiest = sg; sds->busiest = sg;
sds->busiest_nr_running = sgs.sum_nr_running; sds->busiest_nr_running = sgs.sum_nr_running;
...@@ -3981,7 +3976,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, ...@@ -3981,7 +3976,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
update_sd_power_savings_stats(sg, sds, local_group, &sgs); update_sd_power_savings_stats(sg, sds, local_group, &sgs);
sg = sg->next; sg = sg->next;
} while (sg != sd->groups); } while (sg != env->sd->groups);
} }
/** /**
...@@ -4009,24 +4004,23 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, ...@@ -4009,24 +4004,23 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
* @this_cpu: The cpu at whose sched_domain we're performing load-balance. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
* @imbalance: returns amount of imbalanced due to packing. * @imbalance: returns amount of imbalanced due to packing.
*/ */
static int check_asym_packing(struct sched_domain *sd, static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
struct sd_lb_stats *sds,
int this_cpu, unsigned long *imbalance)
{ {
int busiest_cpu; int busiest_cpu;
if (!(sd->flags & SD_ASYM_PACKING)) if (!(env->sd->flags & SD_ASYM_PACKING))
return 0; return 0;
if (!sds->busiest) if (!sds->busiest)
return 0; return 0;
busiest_cpu = group_first_cpu(sds->busiest); busiest_cpu = group_first_cpu(sds->busiest);
if (this_cpu > busiest_cpu) if (env->dst_cpu > busiest_cpu)
return 0; return 0;
*imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, env->imbalance = DIV_ROUND_CLOSEST(
SCHED_POWER_SCALE); sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
return 1; return 1;
} }
...@@ -4038,8 +4032,8 @@ static int check_asym_packing(struct sched_domain *sd, ...@@ -4038,8 +4032,8 @@ static int check_asym_packing(struct sched_domain *sd,
* @this_cpu: The cpu at whose sched_domain we're performing load-balance. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
* @imbalance: Variable to store the imbalance. * @imbalance: Variable to store the imbalance.
*/ */
static inline void fix_small_imbalance(struct sd_lb_stats *sds, static inline
int this_cpu, unsigned long *imbalance) void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{ {
unsigned long tmp, pwr_now = 0, pwr_move = 0; unsigned long tmp, pwr_now = 0, pwr_move = 0;
unsigned int imbn = 2; unsigned int imbn = 2;
...@@ -4050,9 +4044,10 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, ...@@ -4050,9 +4044,10 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
if (sds->busiest_load_per_task > if (sds->busiest_load_per_task >
sds->this_load_per_task) sds->this_load_per_task)
imbn = 1; imbn = 1;
} else } else {
sds->this_load_per_task = sds->this_load_per_task =
cpu_avg_load_per_task(this_cpu); cpu_avg_load_per_task(env->dst_cpu);
}
scaled_busy_load_per_task = sds->busiest_load_per_task scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_POWER_SCALE; * SCHED_POWER_SCALE;
...@@ -4060,7 +4055,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, ...@@ -4060,7 +4055,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) { (scaled_busy_load_per_task * imbn)) {
*imbalance = sds->busiest_load_per_task; env->imbalance = sds->busiest_load_per_task;
return; return;
} }
...@@ -4097,18 +4092,16 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, ...@@ -4097,18 +4092,16 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
/* Move if we gain throughput */ /* Move if we gain throughput */
if (pwr_move > pwr_now) if (pwr_move > pwr_now)
*imbalance = sds->busiest_load_per_task; env->imbalance = sds->busiest_load_per_task;
} }
/** /**
* calculate_imbalance - Calculate the amount of imbalance present within the * calculate_imbalance - Calculate the amount of imbalance present within the
* groups of a given sched_domain during load balance. * groups of a given sched_domain during load balance.
* @env: load balance environment
* @sds: statistics of the sched_domain whose imbalance is to be calculated. * @sds: statistics of the sched_domain whose imbalance is to be calculated.
* @this_cpu: Cpu for which currently load balance is being performed.
* @imbalance: The variable to store the imbalance.
*/ */
static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
unsigned long *imbalance)
{ {
unsigned long max_pull, load_above_capacity = ~0UL; unsigned long max_pull, load_above_capacity = ~0UL;
...@@ -4124,8 +4117,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, ...@@ -4124,8 +4117,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
* its cpu_power, while calculating max_load..) * its cpu_power, while calculating max_load..)
*/ */
if (sds->max_load < sds->avg_load) { if (sds->max_load < sds->avg_load) {
*imbalance = 0; env->imbalance = 0;
return fix_small_imbalance(sds, this_cpu, imbalance); return fix_small_imbalance(env, sds);
} }
if (!sds->group_imb) { if (!sds->group_imb) {
...@@ -4153,7 +4146,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, ...@@ -4153,7 +4146,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */ /* How much load to actually move to equalise the imbalance */
*imbalance = min(max_pull * sds->busiest->sgp->power, env->imbalance = min(max_pull * sds->busiest->sgp->power,
(sds->avg_load - sds->this_load) * sds->this->sgp->power) (sds->avg_load - sds->this_load) * sds->this->sgp->power)
/ SCHED_POWER_SCALE; / SCHED_POWER_SCALE;
...@@ -4163,8 +4156,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, ...@@ -4163,8 +4156,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
* a think about bumping its value to force at least one task to be * a think about bumping its value to force at least one task to be
* moved * moved
*/ */
if (*imbalance < sds->busiest_load_per_task) if (env->imbalance < sds->busiest_load_per_task)
return fix_small_imbalance(sds, this_cpu, imbalance); return fix_small_imbalance(env, sds);
} }
...@@ -4195,9 +4188,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, ...@@ -4195,9 +4188,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
* put to idle by rebalancing its tasks onto our group. * put to idle by rebalancing its tasks onto our group.
*/ */
static struct sched_group * static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu, find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
unsigned long *imbalance, enum cpu_idle_type idle,
const struct cpumask *cpus, int *balance)
{ {
struct sd_lb_stats sds; struct sd_lb_stats sds;
...@@ -4207,7 +4198,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -4207,7 +4198,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* Compute the various statistics relavent for load balancing at * Compute the various statistics relavent for load balancing at
* this level. * this level.
*/ */
update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds); update_sd_lb_stats(env, cpus, balance, &sds);
/* /*
* this_cpu is not the appropriate cpu to perform load balancing at * this_cpu is not the appropriate cpu to perform load balancing at
...@@ -4216,8 +4207,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -4216,8 +4207,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (!(*balance)) if (!(*balance))
goto ret; goto ret;
if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) && if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
check_asym_packing(sd, &sds, this_cpu, imbalance)) check_asym_packing(env, &sds))
return sds.busiest; return sds.busiest;
/* There is no busy sibling group to pull tasks from */ /* There is no busy sibling group to pull tasks from */
...@@ -4235,7 +4226,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -4235,7 +4226,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
goto force_balance; goto force_balance;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
!sds.busiest_has_capacity) !sds.busiest_has_capacity)
goto force_balance; goto force_balance;
...@@ -4253,7 +4244,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -4253,7 +4244,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sds.this_load >= sds.avg_load) if (sds.this_load >= sds.avg_load)
goto out_balanced; goto out_balanced;
if (idle == CPU_IDLE) { if (env->idle == CPU_IDLE) {
/* /*
* This cpu is idle. If the busiest group load doesn't * This cpu is idle. If the busiest group load doesn't
* have more tasks than the number of available cpu's and * have more tasks than the number of available cpu's and
...@@ -4268,13 +4259,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -4268,13 +4259,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
* imbalance_pct to be conservative. * imbalance_pct to be conservative.
*/ */
if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
goto out_balanced; goto out_balanced;
} }
force_balance: force_balance:
/* Looks like there is an imbalance. Compute it */ /* Looks like there is an imbalance. Compute it */
calculate_imbalance(&sds, this_cpu, imbalance); calculate_imbalance(env, &sds);
return sds.busiest; return sds.busiest;
out_balanced: out_balanced:
...@@ -4282,20 +4273,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -4282,20 +4273,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* There is no obvious imbalance. But check if we can do some balancing * There is no obvious imbalance. But check if we can do some balancing
* to save power. * to save power.
*/ */
if (check_power_save_busiest_group(&sds, this_cpu, imbalance)) if (check_power_save_busiest_group(env, &sds))
return sds.busiest; return sds.busiest;
ret: ret:
*imbalance = 0; env->imbalance = 0;
return NULL; return NULL;
} }
/* /*
* find_busiest_queue - find the busiest runqueue among the cpus in group. * find_busiest_queue - find the busiest runqueue among the cpus in group.
*/ */
static struct rq * static struct rq *find_busiest_queue(struct lb_env *env,
find_busiest_queue(struct sched_domain *sd, struct sched_group *group, struct sched_group *group,
enum cpu_idle_type idle, unsigned long imbalance, const struct cpumask *cpus)
const struct cpumask *cpus)
{ {
struct rq *busiest = NULL, *rq; struct rq *busiest = NULL, *rq;
unsigned long max_load = 0; unsigned long max_load = 0;
...@@ -4308,7 +4298,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group, ...@@ -4308,7 +4298,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
unsigned long wl; unsigned long wl;
if (!capacity) if (!capacity)
capacity = fix_small_capacity(sd, group); capacity = fix_small_capacity(env->sd, group);
if (!cpumask_test_cpu(i, cpus)) if (!cpumask_test_cpu(i, cpus))
continue; continue;
...@@ -4320,7 +4310,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group, ...@@ -4320,7 +4310,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
* When comparing with imbalance, use weighted_cpuload() * When comparing with imbalance, use weighted_cpuload()
* which is not scaled with the cpu power. * which is not scaled with the cpu power.
*/ */
if (capacity && rq->nr_running == 1 && wl > imbalance) if (capacity && rq->nr_running == 1 && wl > env->imbalance)
continue; continue;
/* /*
...@@ -4349,17 +4339,18 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group, ...@@ -4349,17 +4339,18 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
/* Working cpumask for load_balance and load_balance_newidle. */ /* Working cpumask for load_balance and load_balance_newidle. */
DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
static int need_active_balance(struct sched_domain *sd, int idle, static int need_active_balance(struct lb_env *env)
int busiest_cpu, int this_cpu)
{ {
if (idle == CPU_NEWLY_IDLE) { struct sched_domain *sd = env->sd;
if (env->idle == CPU_NEWLY_IDLE) {
/* /*
* ASYM_PACKING needs to force migrate tasks from busy but * ASYM_PACKING needs to force migrate tasks from busy but
* higher numbered CPUs in order to pack all tasks in the * higher numbered CPUs in order to pack all tasks in the
* lowest numbered CPUs. * lowest numbered CPUs.
*/ */
if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu) if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
return 1; return 1;
/* /*
...@@ -4400,7 +4391,6 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4400,7 +4391,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
{ {
int ld_moved, active_balance = 0; int ld_moved, active_balance = 0;
struct sched_group *group; struct sched_group *group;
unsigned long imbalance;
struct rq *busiest; struct rq *busiest;
unsigned long flags; unsigned long flags;
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
...@@ -4418,8 +4408,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4418,8 +4408,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]); schedstat_inc(sd, lb_count[idle]);
redo: redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, group = find_busiest_group(&env, cpus, balance);
cpus, balance);
if (*balance == 0) if (*balance == 0)
goto out_balanced; goto out_balanced;
...@@ -4429,7 +4418,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4429,7 +4418,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
goto out_balanced; goto out_balanced;
} }
busiest = find_busiest_queue(sd, group, idle, imbalance, cpus); busiest = find_busiest_queue(&env, group, cpus);
if (!busiest) { if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]); schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced; goto out_balanced;
...@@ -4437,7 +4426,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4437,7 +4426,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
BUG_ON(busiest == this_rq); BUG_ON(busiest == this_rq);
schedstat_add(sd, lb_imbalance[idle], imbalance); schedstat_add(sd, lb_imbalance[idle], env.imbalance);
ld_moved = 0; ld_moved = 0;
if (busiest->nr_running > 1) { if (busiest->nr_running > 1) {
...@@ -4448,7 +4437,6 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4448,7 +4437,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* correctly treated as an imbalance. * correctly treated as an imbalance.
*/ */
env.flags |= LBF_ALL_PINNED; env.flags |= LBF_ALL_PINNED;
env.load_move = imbalance;
env.src_cpu = busiest->cpu; env.src_cpu = busiest->cpu;
env.src_rq = busiest; env.src_rq = busiest;
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
...@@ -4493,7 +4481,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4493,7 +4481,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
if (idle != CPU_NEWLY_IDLE) if (idle != CPU_NEWLY_IDLE)
sd->nr_balance_failed++; sd->nr_balance_failed++;
if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) { if (need_active_balance(&env)) {
raw_spin_lock_irqsave(&busiest->lock, flags); raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the active_load_balance_cpu_stop, /* don't kick the active_load_balance_cpu_stop,
...@@ -4520,10 +4508,11 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4520,10 +4508,11 @@ static int load_balance(int this_cpu, struct rq *this_rq,
} }
raw_spin_unlock_irqrestore(&busiest->lock, flags); raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance) if (active_balance) {
stop_one_cpu_nowait(cpu_of(busiest), stop_one_cpu_nowait(cpu_of(busiest),
active_load_balance_cpu_stop, busiest, active_load_balance_cpu_stop, busiest,
&busiest->active_balance_work); &busiest->active_balance_work);
}
/* /*
* We've kicked active balancing, reset the failure * We've kicked active balancing, reset the failure
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册