提交 d295917a 编写于 作者: L Linus Torvalds

Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
 "The irq department provides:

   - two fixes for the CPU affinity spread infrastructure to prevent
     unbalanced spreading in corner cases which leads to horrible
     performance, because interrupts are rather aggregated than spread

   - add a missing spinlock initializer in the imx-gpcv2 init code"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/irq-imx-gpcv2: Fix spinlock initialization
  irq/affinity: Fix extra vecs calculation
  irq/affinity: Fix CPU spread for unbalanced nodes
...@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, ...@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return -ENOMEM; return -ENOMEM;
} }
raw_spin_lock_init(&cd->rlock);
cd->gpc_base = of_iomap(node, 0); cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) { if (!cd->gpc_base) {
pr_err("fsl-gpcv2: unable to map gpc registers\n"); pr_err("fsl-gpcv2: unable to map gpc registers\n");
......
...@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) ...@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
struct cpumask * struct cpumask *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{ {
int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; int n, nodes, cpus_per_vec, extra_vecs, curvec;
int affv = nvecs - affd->pre_vectors - affd->post_vectors; int affv = nvecs - affd->pre_vectors - affd->post_vectors;
int last_affv = affv + affd->pre_vectors; int last_affv = affv + affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE; nodemask_t nodemsk = NODE_MASK_NONE;
...@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
goto done; goto done;
} }
/* Spread the vectors per node */
vecs_per_node = affv / nodes;
/* Account for rounding errors */
extra_vecs = affv - (nodes * vecs_per_node);
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
int ncpus, v, vecs_to_assign = vecs_per_node; int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */
vecs_per_node = (affv - curvec) / nodes;
/* Get the cpus on this node which are in the mask */ /* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
/* Calculate the number of cpus per vector */ /* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk); ncpus = cpumask_weight(nmsk);
vecs_to_assign = min(vecs_per_node, ncpus);
/* Account for rounding errors */
extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
for (v = 0; curvec < last_affv && v < vecs_to_assign; for (v = 0; curvec < last_affv && v < vecs_to_assign;
curvec++, v++) { curvec++, v++) {
...@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Account for extra vectors to compensate rounding errors */ /* Account for extra vectors to compensate rounding errors */
if (extra_vecs) { if (extra_vecs) {
cpus_per_vec++; cpus_per_vec++;
if (!--extra_vecs) --extra_vecs;
vecs_per_node++;
} }
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
} }
if (curvec >= last_affv) if (curvec >= last_affv)
break; break;
--nodes;
} }
done: done:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册