提交 cbbfb0ae 编写于 作者: L Linus Torvalds

Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 apic updates from Ingo Molnar:
 "Improve the spreading of managed IRQs at allocation time"

* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irq/matrix: Spread managed interrupts on allocation
  irq/matrix: Split out the CPU selection code into a helper
...@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) ...@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
struct apic_chip_data *apicd = apic_chip_data(irqd); struct apic_chip_data *apicd = apic_chip_data(irqd);
int vector, cpu; int vector, cpu;
cpumask_and(vector_searchmask, vector_searchmask, affmsk); cpumask_and(vector_searchmask, dest, affmsk);
cpu = cpumask_first(vector_searchmask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
/* set_affinity might call here for nothing */ /* set_affinity might call here for nothing */
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
return 0; return 0;
vector = irq_matrix_alloc_managed(vector_matrix, cpu); vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
&cpu);
trace_vector_alloc_managed(irqd->irq, vector, vector); trace_vector_alloc_managed(irqd->irq, vector, vector);
if (vector < 0) if (vector < 0)
return vector; return vector;
......
...@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m); ...@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m); void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m); void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
......
...@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, ...@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
return area; return area;
} }
/* Find the best CPU which has the lowest vector allocation count */
static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
const struct cpumask *msk)
{
unsigned int cpu, best_cpu, maxavl = 0;
struct cpumap *cm;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->available <= maxavl)
continue;
best_cpu = cpu;
maxavl = cm->available;
}
return best_cpu;
}
/** /**
* irq_matrix_assign_system - Assign system wide entry in the matrix * irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer * @m: Matrix pointer
...@@ -239,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) ...@@ -239,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
* @m: Matrix pointer * @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated * @cpu: On which CPU the interrupt should be allocated
*/ */
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu)
{ {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu); unsigned int bit, cpu, end = m->alloc_end;
unsigned int bit, end = m->alloc_end; struct cpumap *cm;
if (cpumask_empty(msk))
return -EINVAL;
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
cm = per_cpu_ptr(m->maps, cpu);
end = m->alloc_end;
/* Get managed bit which are not allocated */ /* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end); bit = find_first_bit(m->scratch_map, end);
...@@ -252,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) ...@@ -252,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
set_bit(bit, cm->alloc_map); set_bit(bit, cm->alloc_map);
cm->allocated++; cm->allocated++;
m->total_allocated++; m->total_allocated++;
*mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm); trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit; return bit;
} }
...@@ -322,37 +354,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m) ...@@ -322,37 +354,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu) bool reserved, unsigned int *mapped_cpu)
{ {
unsigned int cpu, best_cpu, maxavl = 0; unsigned int cpu, bit;
struct cpumap *cm; struct cpumap *cm;
unsigned int bit;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->available <= maxavl) cpu = matrix_find_best_cpu(m, msk);
continue; if (cpu == UINT_MAX)
return -ENOSPC;
best_cpu = cpu; cm = per_cpu_ptr(m->maps, cpu);
maxavl = cm->available; bit = matrix_alloc_area(m, cm, 1, false);
} if (bit >= m->alloc_end)
return -ENOSPC;
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = cpu;
trace_irq_matrix_alloc(bit, cpu, m, cm);
return bit;
if (maxavl) {
cm = per_cpu_ptr(m->maps, best_cpu);
bit = matrix_alloc_area(m, cm, 1, false);
if (bit < m->alloc_end) {
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = best_cpu;
trace_irq_matrix_alloc(bit, best_cpu, m, cm);
return bit;
}
}
return -ENOSPC;
} }
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册