提交 52b166af 编写于 作者: T Thomas Gleixner

x86/apic: Move online masking to core code

All implementations of apic->cpu_mask_to_apicid_and() mask out the offline
cpus. The callsite already has a mask available, which has the offline CPUs
removed. Use that and remove the extra bits.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Link: http://lkml.kernel.org/r/20170619235446.560868224@linutronix.de
上级 bbcf9574
......@@ -2205,19 +2205,12 @@ int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned int cpu;
unsigned int cpu = cpumask_first_and(cpumask, andmask);
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
if (likely(cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
return 0;
}
return -EINVAL;
if (cpu >= nr_cpu_ids)
return -EINVAL;
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
return 0;
}
int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
......@@ -2226,14 +2219,12 @@ int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
{
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
cpumask_bits(andmask)[0] &
cpumask_bits(cpu_online_mask)[0] &
APIC_ALL_CPUS;
if (likely(cpu_mask)) {
*apicid = (unsigned int)cpu_mask;
return 0;
}
return -EINVAL;
if (!cpu_mask)
return -EINVAL;
*apicid = (unsigned int)cpu_mask;
return 0;
}
/*
......
......@@ -221,8 +221,11 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
* Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
* as we already established, that mask & d->domain & cpu_online_mask
* is not empty.
*
* vector_searchmask is a subset of d->domain and has the offline
* cpus masked out.
*/
BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
BUG_ON(apic->cpu_mask_to_apicid_and(mask, vector_searchmask,
&d->cfg.dest_apicid));
return 0;
}
......
......@@ -108,31 +108,24 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned int cpu;
u32 dest = 0;
u16 cluster;
int i;
for_each_cpu_and(i, cpumask, andmask) {
if (!cpumask_test_cpu(i, cpu_online_mask))
continue;
dest = per_cpu(x86_cpu_to_logical_apicid, i);
cluster = x2apic_cluster(i);
break;
}
if (!dest)
cpu = cpumask_first_and(cpumask, andmask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
for_each_cpu_and(i, cpumask, andmask) {
if (!cpumask_test_cpu(i, cpu_online_mask))
continue;
if (cluster != x2apic_cluster(i))
dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
cluster = x2apic_cluster(cpu);
for_each_cpu_and(cpu, cpumask, andmask) {
if (cluster != x2apic_cluster(cpu))
continue;
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
}
*apicid = dest;
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册