diff --git a/block/blk.h b/block/blk.h index d2e49af90db548ffa68e950bd800a48fdde79a1d..6e1ed40534e97e78ff0263268b2826344f2207b7 100644 --- a/block/blk.h +++ b/block/blk.h @@ -99,8 +99,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) static inline int blk_cpu_to_group(int cpu) { #ifdef CONFIG_SCHED_MC - cpumask_t mask = cpu_coregroup_map(cpu); - return first_cpu(mask); + const struct cpumask *mask = cpu_coregroup_mask(cpu); + return cpumask_first(mask); #elif defined(CONFIG_SCHED_SMT) return first_cpu(per_cpu(cpu_sibling_map, cpu)); #else diff --git a/kernel/sched.c b/kernel/sched.c index d2d16d1273b1ff9e4473aa7415d278e7b4013631..42929239830fead30c867760c49153b20997b119 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7119,7 +7119,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, { int group; #ifdef CONFIG_SCHED_MC - *mask = cpu_coregroup_map(cpu); + *mask = *cpu_coregroup_mask(cpu); cpus_and(*mask, *mask, *cpu_map); group = first_cpu(*mask); #elif defined(CONFIG_SCHED_SMT) @@ -7485,7 +7485,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, sd = &per_cpu(core_domains, i); SD_INIT(sd, MC); set_domain_attribute(sd, attr); - sd->span = cpu_coregroup_map(i); + sd->span = *cpu_coregroup_mask(i); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; @@ -7528,7 +7528,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, SCHED_CPUMASK_VAR(this_core_map, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); - *this_core_map = cpu_coregroup_map(i); + *this_core_map = *cpu_coregroup_mask(i); cpus_and(*this_core_map, *this_core_map, *cpu_map); if (i != first_cpu(*this_core_map)) continue;