提交 0b8255e6 编写于 作者: S Suresh Siddha 提交者: Ingo Molnar

x86/x2apic/cluster: Use all the members of one cluster specified in the...

x86/x2apic/cluster: Use all the members of one cluster specified in the smp_affinity mask for the interrupt destination

If the HW implements round-robin interrupt delivery, this
enables multiple cpu's (which are part of the user specified
interrupt smp_affinity mask and belong to the same x2apic
cluster) to service the interrupt.

Also if the platform supports Power Aware Interrupt Routing,
then this enables the interrupt to be routed to an idle cpu or a
busy cpu depending on the perf/power bias tunable.

We are now grouping all the cpu's in a cluster to one vector
domain. So that will limit the total number of interrupt sources
handled by Linux. Previously we support "cpu-count *
available-vectors-per-cpu" interrupt sources but this will now
reduce to "cpu-count/16 * available-vectors-per-cpu".
Signed-off-by: NSuresh Siddha <suresh.b.siddha@intel.com>
Cc: yinghai@kernel.org
Cc: gorcunov@openvz.org
Cc: agordeev@redhat.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1337644682-19854-2-git-send-email-suresh.b.siddha@intel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 332afa65
...@@ -28,15 +28,6 @@ static int x2apic_apic_id_registered(void) ...@@ -28,15 +28,6 @@ static int x2apic_apic_id_registered(void)
return 1; return 1;
} }
/*
* For now each logical cpu is in its own vector allocation domain.
*/
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static void static void
__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
{ {
......
...@@ -98,34 +98,47 @@ static void x2apic_send_IPI_all(int vector) ...@@ -98,34 +98,47 @@ static void x2apic_send_IPI_all(int vector)
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
/*
* We're using fixed IRQ delivery, can only return one logical APIC ID.
* May as well be the first.
*/
int cpu = cpumask_first(cpumask); int cpu = cpumask_first(cpumask);
u32 dest = 0;
int i;
if ((unsigned)cpu < nr_cpu_ids) if (cpu > nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
else
return BAD_APICID; return BAD_APICID;
for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu))
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
return dest;
} }
static unsigned int static unsigned int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask) const struct cpumask *andmask)
{ {
int cpu; u32 dest = 0;
u16 cluster;
int i;
/* for_each_cpu_and(i, cpumask, andmask) {
* We're using fixed IRQ delivery, can only return one logical APIC ID. if (!cpumask_test_cpu(i, cpu_online_mask))
* May as well be the first. continue;
*/ dest = per_cpu(x86_cpu_to_logical_apicid, i);
for_each_cpu_and(cpu, cpumask, andmask) { cluster = x2apic_cluster(i);
if (cpumask_test_cpu(cpu, cpu_online_mask)) break;
break;
} }
return per_cpu(x86_cpu_to_logical_apicid, cpu); if (!dest)
return BAD_APICID;
for_each_cpu_and(i, cpumask, andmask) {
if (!cpumask_test_cpu(i, cpu_online_mask))
continue;
if (cluster != x2apic_cluster(i))
continue;
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
}
return dest;
} }
static void init_x2apic_ldr(void) static void init_x2apic_ldr(void)
...@@ -208,6 +221,15 @@ static int x2apic_cluster_probe(void) ...@@ -208,6 +221,15 @@ static int x2apic_cluster_probe(void)
return 0; return 0;
} }
/*
* Each x2apic cluster is an allocation domain.
*/
static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
}
static struct apic apic_x2apic_cluster = { static struct apic apic_x2apic_cluster = {
.name = "cluster x2apic", .name = "cluster x2apic",
...@@ -225,7 +247,7 @@ static struct apic apic_x2apic_cluster = { ...@@ -225,7 +247,7 @@ static struct apic apic_x2apic_cluster = {
.check_apicid_used = NULL, .check_apicid_used = NULL,
.check_apicid_present = NULL, .check_apicid_present = NULL,
.vector_allocation_domain = x2apic_vector_allocation_domain, .vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr, .init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
......
...@@ -120,6 +120,15 @@ static int x2apic_phys_probe(void) ...@@ -120,6 +120,15 @@ static int x2apic_phys_probe(void)
return apic == &apic_x2apic_phys; return apic == &apic_x2apic_phys;
} }
/*
* Each logical cpu is in its own vector allocation domain.
*/
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static struct apic apic_x2apic_phys = { static struct apic apic_x2apic_phys = {
.name = "physical x2apic", .name = "physical x2apic",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册