提交 2f13ff1d 编写于 作者: M Marc Zyngier

irqchip/gic-v3-its: Track LPI distribution on a per CPU basis

In order to improve the distribution of LPIs among CPUs, let start by
tracking the number of LPIs assigned to CPUs, both for managed and
non-managed interrupts (as separate counters).
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Tested-by: NJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/20200515165752.121296-2-maz@kernel.org
上级 337cbeb2
...@@ -174,6 +174,13 @@ static struct { ...@@ -174,6 +174,13 @@ static struct {
int next_victim; int next_victim;
} vpe_proxy; } vpe_proxy;
struct cpu_lpi_count {
atomic_t managed;
atomic_t unmanaged;
};
static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
static LIST_HEAD(its_nodes); static LIST_HEAD(its_nodes);
static DEFINE_RAW_SPINLOCK(its_lock); static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists; static struct rdists *gic_rdists;
...@@ -1510,6 +1517,30 @@ static void its_unmask_irq(struct irq_data *d) ...@@ -1510,6 +1517,30 @@ static void its_unmask_irq(struct irq_data *d)
lpi_update_config(d, 0, LPI_PROP_ENABLED); lpi_update_config(d, 0, LPI_PROP_ENABLED);
} }
static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
{
if (irqd_affinity_is_managed(d))
return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
}
static void its_inc_lpi_count(struct irq_data *d, int cpu)
{
if (irqd_affinity_is_managed(d))
atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
else
atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
}
static void its_dec_lpi_count(struct irq_data *d, int cpu)
{
if (irqd_affinity_is_managed(d))
atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
else
atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
}
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force) bool force)
{ {
...@@ -1518,34 +1549,44 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -1518,34 +1549,44 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col; struct its_collection *target_col;
u32 id = its_get_event_id(d); u32 id = its_get_event_id(d);
int prev_cpu;
/* A forwarded interrupt should use irq_set_vcpu_affinity */ /* A forwarded interrupt should use irq_set_vcpu_affinity */
if (irqd_is_forwarded_to_vcpu(d)) if (irqd_is_forwarded_to_vcpu(d))
return -EINVAL; return -EINVAL;
prev_cpu = its_dev->event_map.col_map[id];
its_dec_lpi_count(d, prev_cpu);
/* lpi cannot be routed to a redistributor that is on a foreign node */ /* lpi cannot be routed to a redistributor that is on a foreign node */
if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
if (its_dev->its->numa_node >= 0) { if (its_dev->its->numa_node >= 0) {
cpu_mask = cpumask_of_node(its_dev->its->numa_node); cpu_mask = cpumask_of_node(its_dev->its->numa_node);
if (!cpumask_intersects(mask_val, cpu_mask)) if (!cpumask_intersects(mask_val, cpu_mask))
return -EINVAL; goto err;
} }
} }
cpu = cpumask_any_and(mask_val, cpu_mask); cpu = cpumask_any_and(mask_val, cpu_mask);
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
return -EINVAL; goto err;
/* don't set the affinity when the target cpu is same as current one */ /* don't set the affinity when the target cpu is same as current one */
if (cpu != its_dev->event_map.col_map[id]) { if (cpu != prev_cpu) {
target_col = &its_dev->its->collections[cpu]; target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id); its_send_movi(its_dev, target_col, id);
its_dev->event_map.col_map[id] = cpu; its_dev->event_map.col_map[id] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu));
} }
its_inc_lpi_count(d, cpu);
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
err:
its_inc_lpi_count(d, prev_cpu);
return -EINVAL;
} }
static u64 its_irq_get_msi_base(struct its_device *its_dev) static u64 its_irq_get_msi_base(struct its_device *its_dev)
...@@ -3448,6 +3489,7 @@ static int its_irq_domain_activate(struct irq_domain *domain, ...@@ -3448,6 +3489,7 @@ static int its_irq_domain_activate(struct irq_domain *domain,
cpu = cpumask_first(cpu_online_mask); cpu = cpumask_first(cpu_online_mask);
} }
its_inc_lpi_count(d, cpu);
its_dev->event_map.col_map[event] = cpu; its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu));
...@@ -3462,6 +3504,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, ...@@ -3462,6 +3504,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d); u32 event = its_get_event_id(d);
its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
/* Stop the delivery of interrupts */ /* Stop the delivery of interrupts */
its_send_discard(its_dev, event); its_send_discard(its_dev, event);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册