提交 be4d638c 编写于 作者: R Rusty Russell

cpumask: Replace cpu_coregroup_map with cpu_coregroup_mask

cpu_coregroup_map returned a cpumask_t: it's going away.

(Note, the sched part of this patch won't apply meaningfully to the
sched tree, but I'm posting it to show the goal).
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: NMike Travis <travis@sgi.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Ingo Molnar <mingo@redhat.com>
上级 9be3eec2
...@@ -99,8 +99,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) ...@@ -99,8 +99,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
static inline int blk_cpu_to_group(int cpu) static inline int blk_cpu_to_group(int cpu)
{ {
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
cpumask_t mask = cpu_coregroup_map(cpu); const struct cpumask *mask = cpu_coregroup_mask(cpu);
return first_cpu(mask); return cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT) #elif defined(CONFIG_SCHED_SMT)
return first_cpu(per_cpu(cpu_sibling_map, cpu)); return first_cpu(per_cpu(cpu_sibling_map, cpu));
#else #else
......
...@@ -7119,7 +7119,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ...@@ -7119,7 +7119,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
{ {
int group; int group;
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
*mask = cpu_coregroup_map(cpu); *mask = *cpu_coregroup_mask(cpu);
cpus_and(*mask, *mask, *cpu_map); cpus_and(*mask, *mask, *cpu_map);
group = first_cpu(*mask); group = first_cpu(*mask);
#elif defined(CONFIG_SCHED_SMT) #elif defined(CONFIG_SCHED_SMT)
...@@ -7485,7 +7485,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7485,7 +7485,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(core_domains, i); sd = &per_cpu(core_domains, i);
SD_INIT(sd, MC); SD_INIT(sd, MC);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sd->span = cpu_coregroup_map(i); sd->span = *cpu_coregroup_mask(i);
cpus_and(sd->span, sd->span, *cpu_map); cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p; sd->parent = p;
p->child = sd; p->child = sd;
...@@ -7528,7 +7528,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7528,7 +7528,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SCHED_CPUMASK_VAR(this_core_map, allmasks); SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks);
*this_core_map = cpu_coregroup_map(i); *this_core_map = *cpu_coregroup_mask(i);
cpus_and(*this_core_map, *this_core_map, *cpu_map); cpus_and(*this_core_map, *this_core_map, *cpu_map);
if (i != first_cpu(*this_core_map)) if (i != first_cpu(*this_core_map))
continue; continue;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册