提交 14e292f8 编写于 作者: P Peter Zijlstra

sched,rt: Use cpumask_any*_distribute()

Replace a bunch of cpumask_any*() instances with
cpumask_any*_distribute(), by injecting this little bit of random in
cpu selection, we reduce the chance two competing balance operations
working off the same lowest_mask pick the same CPU.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NValentin Schneider <valentin.schneider@arm.com>
Reviewed-by: NDaniel Bristot de Oliveira <bristot@redhat.com>
Link: https://lkml.kernel.org/r/20201023102347.190759694@infradead.org
上级 3015ef4b
...@@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p, ...@@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
return cpumask_next_and(-1, src1p, src2p); return cpumask_next_and(-1, src1p, src2p);
} }
static inline int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
#define for_each_cpu(cpu, mask) \ #define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \ #define for_each_cpu_not(cpu, mask) \
...@@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); ...@@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node); unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p, int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p); const struct cpumask *src2p);
int cpumask_any_distribute(const struct cpumask *srcp);
/** /**
* for_each_cpu - iterate over every cpu in a mask * for_each_cpu - iterate over every cpu in a mask
......
...@@ -2002,8 +2002,8 @@ static int find_later_rq(struct task_struct *task) ...@@ -2002,8 +2002,8 @@ static int find_later_rq(struct task_struct *task)
return this_cpu; return this_cpu;
} }
best_cpu = cpumask_first_and(later_mask, best_cpu = cpumask_any_and_distribute(later_mask,
sched_domain_span(sd)); sched_domain_span(sd));
/* /*
* Last chance: if a CPU being in both later_mask * Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our * and current sd span is valid, that becomes our
...@@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_struct *task) ...@@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_struct *task)
if (this_cpu != -1) if (this_cpu != -1)
return this_cpu; return this_cpu;
cpu = cpumask_any(later_mask); cpu = cpumask_any_distribute(later_mask);
if (cpu < nr_cpu_ids) if (cpu < nr_cpu_ids)
return cpu; return cpu;
......
...@@ -1752,8 +1752,8 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1752,8 +1752,8 @@ static int find_lowest_rq(struct task_struct *task)
return this_cpu; return this_cpu;
} }
best_cpu = cpumask_first_and(lowest_mask, best_cpu = cpumask_any_and_distribute(lowest_mask,
sched_domain_span(sd)); sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) { if (best_cpu < nr_cpu_ids) {
rcu_read_unlock(); rcu_read_unlock();
return best_cpu; return best_cpu;
...@@ -1770,7 +1770,7 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1770,7 +1770,7 @@ static int find_lowest_rq(struct task_struct *task)
if (this_cpu != -1) if (this_cpu != -1)
return this_cpu; return this_cpu;
cpu = cpumask_any(lowest_mask); cpu = cpumask_any_distribute(lowest_mask);
if (cpu < nr_cpu_ids) if (cpu < nr_cpu_ids)
return cpu; return cpu;
......
...@@ -267,3 +267,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p, ...@@ -267,3 +267,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
return next; return next;
} }
EXPORT_SYMBOL(cpumask_any_and_distribute); EXPORT_SYMBOL(cpumask_any_and_distribute);
int cpumask_any_distribute(const struct cpumask *srcp)
{
int next, prev;
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
next = cpumask_next(prev, srcp);
if (next >= nr_cpu_ids)
next = cpumask_first(srcp);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
return next;
}
EXPORT_SYMBOL(cpumask_any_distribute);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册