提交 f7fa7aee 编写于 作者: J Jiang Liu 提交者: Thomas Gleixner

x86/irq: Avoid memory allocation in __assign_irq_vector()

Function __assign_irq_vector() is protected by vector_lock, so use
a global temporary cpu_mask to avoid allocating/freeing cpu_mask.
Signed-off-by: NJiang Liu <jiang.liu@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Cohen <david.a.cohen@linux.intel.com>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Link: http://lkml.kernel.org/r/1428978610-28986-34-git-send-email-jiang.liu@linux.intel.comSigned-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 d746d1eb
...@@ -30,6 +30,7 @@ struct apic_chip_data { ...@@ -30,6 +30,7 @@ struct apic_chip_data {
struct irq_domain *x86_vector_domain; struct irq_domain *x86_vector_domain;
static DEFINE_RAW_SPINLOCK(vector_lock); static DEFINE_RAW_SPINLOCK(vector_lock);
static cpumask_var_t vector_cpumask;
static struct irq_chip lapic_controller; static struct irq_chip lapic_controller;
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
...@@ -116,14 +117,10 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -116,14 +117,10 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16; static int current_offset = VECTOR_OFFSET_START % 16;
int cpu, err; int cpu, err;
cpumask_var_t tmp_mask;
if (d->move_in_progress) if (d->move_in_progress)
return -EBUSY; return -EBUSY;
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
return -ENOMEM;
/* Only try and allocate irqs on cpus that are present */ /* Only try and allocate irqs on cpus that are present */
err = -ENOSPC; err = -ENOSPC;
cpumask_clear(d->old_domain); cpumask_clear(d->old_domain);
...@@ -131,21 +128,22 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -131,21 +128,22 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
while (cpu < nr_cpu_ids) { while (cpu < nr_cpu_ids) {
int new_cpu, vector, offset; int new_cpu, vector, offset;
apic->vector_allocation_domain(cpu, tmp_mask, mask); apic->vector_allocation_domain(cpu, vector_cpumask, mask);
if (cpumask_subset(tmp_mask, d->domain)) { if (cpumask_subset(vector_cpumask, d->domain)) {
err = 0; err = 0;
if (cpumask_equal(tmp_mask, d->domain)) if (cpumask_equal(vector_cpumask, d->domain))
break; break;
/* /*
* New cpumask using the vector is a proper subset of * New cpumask using the vector is a proper subset of
* the current in use mask. So cleanup the vector * the current in use mask. So cleanup the vector
* allocation for the members that are not used anymore. * allocation for the members that are not used anymore.
*/ */
cpumask_andnot(d->old_domain, d->domain, tmp_mask); cpumask_andnot(d->old_domain, d->domain,
vector_cpumask);
d->move_in_progress = d->move_in_progress =
cpumask_intersects(d->old_domain, cpu_online_mask); cpumask_intersects(d->old_domain, cpu_online_mask);
cpumask_and(d->domain, d->domain, tmp_mask); cpumask_and(d->domain, d->domain, vector_cpumask);
break; break;
} }
...@@ -159,16 +157,18 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -159,16 +157,18 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
} }
if (unlikely(current_vector == vector)) { if (unlikely(current_vector == vector)) {
cpumask_or(d->old_domain, d->old_domain, tmp_mask); cpumask_or(d->old_domain, d->old_domain,
cpumask_andnot(tmp_mask, mask, d->old_domain); vector_cpumask);
cpu = cpumask_first_and(tmp_mask, cpu_online_mask); cpumask_andnot(vector_cpumask, mask, d->old_domain);
cpu = cpumask_first_and(vector_cpumask,
cpu_online_mask);
continue; continue;
} }
if (test_bit(vector, used_vectors)) if (test_bit(vector, used_vectors))
goto next; goto next;
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
if (per_cpu(vector_irq, new_cpu)[vector] > if (per_cpu(vector_irq, new_cpu)[vector] >
VECTOR_UNDEFINED) VECTOR_UNDEFINED)
goto next; goto next;
...@@ -181,14 +181,13 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -181,14 +181,13 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
d->move_in_progress = d->move_in_progress =
cpumask_intersects(d->old_domain, cpu_online_mask); cpumask_intersects(d->old_domain, cpu_online_mask);
} }
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq; per_cpu(vector_irq, new_cpu)[vector] = irq;
d->cfg.vector = vector; d->cfg.vector = vector;
cpumask_copy(d->domain, tmp_mask); cpumask_copy(d->domain, vector_cpumask);
err = 0; err = 0;
break; break;
} }
free_cpumask_var(tmp_mask);
if (!err) { if (!err) {
/* cache destination APIC IDs into cfg->dest_apicid */ /* cache destination APIC IDs into cfg->dest_apicid */
...@@ -397,6 +396,8 @@ int __init arch_early_irq_init(void) ...@@ -397,6 +396,8 @@ int __init arch_early_irq_init(void)
arch_init_msi_domain(x86_vector_domain); arch_init_msi_domain(x86_vector_domain);
arch_init_htirq_domain(x86_vector_domain); arch_init_htirq_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
return arch_early_ioapic_init(); return arch_early_ioapic_init();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册