提交 37125688 编写于 作者: J Julien Thierry 提交者: Xie XiuQi

arm_pmu: Use NMIs for PMU

hulk inclusion
category: feature
bugzilla: 12804
CVE: NA

-------------------------------------------------

Add required PMU interrupt operations for NMIs. Request interrupt lines as
NMIs when possible, otherwise fall back to normal interrupts.
Signed-off-by: NJulien Thierry <julien.thierry@arm.com>
Signed-off-by: NWei Li <liwei391@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 effcfafa
...@@ -44,6 +44,17 @@ static const struct pmu_irq_ops pmuirq_ops = { ...@@ -44,6 +44,17 @@ static const struct pmu_irq_ops pmuirq_ops = {
.free_pmuirq = armpmu_free_pmuirq .free_pmuirq = armpmu_free_pmuirq
}; };
static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
{
free_nmi(irq, per_cpu_ptr(devid, cpu));
}
static const struct pmu_irq_ops pmunmi_ops = {
.enable_pmuirq = enable_nmi,
.disable_pmuirq = disable_nmi_nosync,
.free_pmuirq = armpmu_free_pmunmi
};
static void armpmu_enable_percpu_pmuirq(unsigned int irq) static void armpmu_enable_percpu_pmuirq(unsigned int irq)
{ {
enable_percpu_irq(irq, IRQ_TYPE_NONE); enable_percpu_irq(irq, IRQ_TYPE_NONE);
...@@ -62,6 +73,31 @@ static const struct pmu_irq_ops percpu_pmuirq_ops = { ...@@ -62,6 +73,31 @@ static const struct pmu_irq_ops percpu_pmuirq_ops = {
.free_pmuirq = armpmu_free_percpu_pmuirq .free_pmuirq = armpmu_free_percpu_pmuirq
}; };
static void armpmu_enable_percpu_pmunmi(unsigned int irq)
{
if (!prepare_percpu_nmi(irq))
enable_percpu_nmi(irq, IRQ_TYPE_NONE);
}
static void armpmu_disable_percpu_pmunmi(unsigned int irq)
{
disable_percpu_nmi(irq);
teardown_percpu_nmi(irq);
}
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
void __percpu *devid)
{
if (armpmu_count_irq_users(irq) == 1)
free_percpu_nmi(irq, devid);
}
static const struct pmu_irq_ops percpu_pmunmi_ops = {
.enable_pmuirq = armpmu_enable_percpu_pmunmi,
.disable_pmuirq = armpmu_disable_percpu_pmunmi,
.free_pmuirq = armpmu_free_percpu_pmunmi
};
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq); static DEFINE_PER_CPU(int, cpu_irq);
static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
...@@ -640,15 +676,29 @@ int armpmu_request_irq(int irq, int cpu) ...@@ -640,15 +676,29 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NO_THREAD; IRQF_NO_THREAD;
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_status_flags(irq, IRQ_NOAUTOEN);
err = request_irq(irq, handler, irq_flags, "arm-pmu",
err = request_nmi(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu)); per_cpu_ptr(&cpu_armpmu, cpu));
irq_ops = &pmuirq_ops; /* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
irq_ops = &pmuirq_ops;
} else {
irq_ops = &pmunmi_ops;
}
} else if (armpmu_count_irq_users(irq) == 0) { } else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_irq(irq, handler, "arm-pmu", err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
&cpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */
irq_ops = &percpu_pmuirq_ops; if (err) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
irq_ops = &percpu_pmunmi_ops;
}
} else { } else {
/* Per cpudevid irq was already requested by another CPU */ /* Per cpudevid irq was already requested by another CPU */
irq_ops = armpmu_find_irq_ops(irq); irq_ops = armpmu_find_irq_ops(irq);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册