提交 9f05210d 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

arm64: perf: add pmu_nmi_enable to control pmu nmi support

hulk inclusion
category: feature
bugzilla: 12805
CVE: NA

-------------------------------------------------

Patchset "arm64: perf: add nmi support for pmu" change the
perf code's old logic, this may have risk, use pmu_nmi_enable
to control which code will be used. When pmu_nmi_enable is
diabled, perf will use the old code. The default value is false.
Enable nmi support add 'pmu_nmi_enable' to command line
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NCheng Jian <cj.chengjian@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 5bffbe21
...@@ -277,6 +277,7 @@ static void armv6pmu_enable_event(struct perf_event *event) ...@@ -277,6 +277,7 @@ static void armv6pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
struct raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); struct raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
int idx = hwc->idx; int idx = hwc->idx;
...@@ -300,12 +301,18 @@ static void armv6pmu_enable_event(struct perf_event *event) ...@@ -300,12 +301,18 @@ static void armv6pmu_enable_event(struct perf_event *event)
* Mask out the current event and set the counter to count the event * Mask out the current event and set the counter to count the event
* that we're interested in. * that we're interested in.
*/ */
raw_spin_lock_irqsave(lock, flags); if (pmu_nmi_enable)
raw_spin_lock_irqsave(lock, flags);
else
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(lock, flags); if (pmu_nmi_enable)
raw_spin_unlock_irqrestore(lock, flags);
else
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static irqreturn_t static irqreturn_t
...@@ -369,25 +376,39 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) ...@@ -369,25 +376,39 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
static void armv6pmu_start(struct arm_pmu *cpu_pmu) static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
raw_spin_lock_irqsave(lock, flags); if (pmu_nmi_enable)
raw_spin_lock_irqsave(lock, flags);
else
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE; val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(lock, flags); if (pmu_nmi_enable)
raw_spin_unlock_irqrestore(lock, flags);
else
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv6pmu_stop(struct arm_pmu *cpu_pmu) static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
raw_spin_lock_irqsave(lock, flags); if (pmu_nmi_enable)
raw_spin_lock_irqsave(lock, flags);
else
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE; val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(lock, flags); if (pmu_nmi_enable)
raw_spin_unlock_irqrestore(lock, flags);
else
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
......
...@@ -751,7 +751,12 @@ static inline void armv7_pmsel_write(u32 counter) ...@@ -751,7 +751,12 @@ static inline void armv7_pmsel_write(u32 counter)
static inline void armv7_pmnc_select_counter(int idx) static inline void armv7_pmnc_select_counter(int idx)
{ {
armv7_pmsel_write(ARMV7_IDX_TO_COUNTER(idx)); if (pmu_nmi_enable) {
armv7_pmsel_write(ARMV7_IDX_TO_COUNTER(idx));
} else {
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
}
isb(); isb();
} }
...@@ -882,8 +887,10 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) ...@@ -882,8 +887,10 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
static void armv7pmu_enable_event(struct perf_event *event) static void armv7pmu_enable_event(struct perf_event *event)
{ {
unsigned long flags;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx; int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
...@@ -896,6 +903,8 @@ static void armv7pmu_enable_event(struct perf_event *event) ...@@ -896,6 +903,8 @@ static void armv7pmu_enable_event(struct perf_event *event)
* Enable counter and interrupt, and set the counter to count * Enable counter and interrupt, and set the counter to count
* the event that we're interested in. * the event that we're interested in.
*/ */
if (!pmu_nmi_enable)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
...@@ -919,12 +928,17 @@ static void armv7pmu_enable_event(struct perf_event *event) ...@@ -919,12 +928,17 @@ static void armv7pmu_enable_event(struct perf_event *event)
* Enable counter * Enable counter
*/ */
armv7_pmnc_enable_counter(idx); armv7_pmnc_enable_counter(idx);
if (!pmu_nmi_enable)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv7pmu_disable_event(struct perf_event *event) static void armv7pmu_disable_event(struct perf_event *event)
{ {
unsigned long flags;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx; int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
...@@ -933,6 +947,12 @@ static void armv7pmu_disable_event(struct perf_event *event) ...@@ -933,6 +947,12 @@ static void armv7pmu_disable_event(struct perf_event *event)
return; return;
} }
/*
* Disable counter and interrupt
*/
if (!pmu_nmi_enable)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
*/ */
...@@ -942,6 +962,9 @@ static void armv7pmu_disable_event(struct perf_event *event) ...@@ -942,6 +962,9 @@ static void armv7pmu_disable_event(struct perf_event *event)
* Disable interrupt for this counter * Disable interrupt for this counter
*/ */
armv7_pmnc_disable_intens(idx); armv7_pmnc_disable_intens(idx);
if (!pmu_nmi_enable)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
...@@ -953,7 +976,8 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) ...@@ -953,7 +976,8 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
u32 pmsel; u32 pmsel;
int idx; int idx;
pmsel = armv7_pmsel_read(); if (pmu_nmi_enable)
pmsel = armv7_pmsel_read();
/* /*
* Get and reset the IRQ flags * Get and reset the IRQ flags
...@@ -1005,25 +1029,46 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) ...@@ -1005,25 +1029,46 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
*/ */
irq_work_run(); irq_work_run();
armv7_pmsel_write(pmsel); if (pmu_nmi_enable)
armv7_pmsel_write(pmsel);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void armv7pmu_start(struct arm_pmu *cpu_pmu) static void armv7pmu_start(struct arm_pmu *cpu_pmu)
{ {
preempt_disable(); if (pmu_nmi_enable) {
/* Enable all counters */ preempt_disable();
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); /* Enable all counters */
preempt_enable(); armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
preempt_enable();
} else {
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
} }
static void armv7pmu_stop(struct arm_pmu *cpu_pmu) static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
{ {
preempt_disable(); if (pmu_nmi_enable) {
/* Disable all counters */ preempt_disable();
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); /* Disable all counters */
preempt_enable(); armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
preempt_enable();
} else {
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
} }
static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
...@@ -1489,8 +1534,15 @@ static void krait_clearpmu(u32 config_base) ...@@ -1489,8 +1534,15 @@ static void krait_clearpmu(u32 config_base)
static void krait_pmu_disable_event(struct perf_event *event) static void krait_pmu_disable_event(struct perf_event *event)
{ {
unsigned long flags;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* Disable counter and interrupt */
if (!pmu_nmi_enable) {
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */ /* Disable counter */
armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_counter(idx);
...@@ -1503,17 +1555,25 @@ static void krait_pmu_disable_event(struct perf_event *event) ...@@ -1503,17 +1555,25 @@ static void krait_pmu_disable_event(struct perf_event *event)
/* Disable interrupt for this counter */ /* Disable interrupt for this counter */
armv7_pmnc_disable_intens(idx); armv7_pmnc_disable_intens(idx);
if (!pmu_nmi_enable) {
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void krait_pmu_enable_event(struct perf_event *event) static void krait_pmu_enable_event(struct perf_event *event)
{ {
unsigned long flags;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* /*
* Enable counter and interrupt, and set the counter to count * Enable counter and interrupt, and set the counter to count
* the event that we're interested in. * the event that we're interested in.
*/ */
if (!pmu_nmi_enable) {
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */ /* Disable counter */
armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_counter(idx);
...@@ -1533,6 +1593,9 @@ static void krait_pmu_enable_event(struct perf_event *event) ...@@ -1533,6 +1593,9 @@ static void krait_pmu_enable_event(struct perf_event *event)
/* Enable counter */ /* Enable counter */
armv7_pmnc_enable_counter(idx); armv7_pmnc_enable_counter(idx);
if (!pmu_nmi_enable) {
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void krait_pmu_reset(void *info) static void krait_pmu_reset(void *info)
...@@ -1808,8 +1871,15 @@ static void scorpion_clearpmu(u32 config_base) ...@@ -1808,8 +1871,15 @@ static void scorpion_clearpmu(u32 config_base)
static void scorpion_pmu_disable_event(struct perf_event *event) static void scorpion_pmu_disable_event(struct perf_event *event)
{ {
unsigned long flags;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* Disable counter and interrupt */
if (!pmu_nmi_enable) {
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */ /* Disable counter */
armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_counter(idx);
...@@ -1822,17 +1892,25 @@ static void scorpion_pmu_disable_event(struct perf_event *event) ...@@ -1822,17 +1892,25 @@ static void scorpion_pmu_disable_event(struct perf_event *event)
/* Disable interrupt for this counter */ /* Disable interrupt for this counter */
armv7_pmnc_disable_intens(idx); armv7_pmnc_disable_intens(idx);
if (!pmu_nmi_enable) {
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void scorpion_pmu_enable_event(struct perf_event *event) static void scorpion_pmu_enable_event(struct perf_event *event)
{ {
unsigned long flags;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* /*
* Enable counter and interrupt, and set the counter to count * Enable counter and interrupt, and set the counter to count
* the event that we're interested in. * the event that we're interested in.
*/ */
if (!pmu_nmi_enable) {
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */ /* Disable counter */
armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_counter(idx);
...@@ -1852,6 +1930,9 @@ static void scorpion_pmu_enable_event(struct perf_event *event) ...@@ -1852,6 +1930,9 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
/* Enable counter */ /* Enable counter */
armv7_pmnc_enable_counter(idx); armv7_pmnc_enable_counter(idx);
if (!pmu_nmi_enable) {
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void scorpion_pmu_reset(void *info) static void scorpion_pmu_reset(void *info)
......
...@@ -597,9 +597,20 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) ...@@ -597,9 +597,20 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
} }
static inline void armv8pmu_select_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(counter, pmselr_el0);
isb();
}
static inline u32 armv8pmu_read_evcntr(int idx) static inline u32 armv8pmu_read_evcntr(int idx)
{ {
return read_pmevcntrn(idx); if (pmu_nmi_enable)
return read_pmevcntrn(idx);
armv8pmu_select_counter(idx);
return read_sysreg(pmxevcntr_el0);
} }
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
...@@ -633,7 +644,12 @@ static inline u64 armv8pmu_read_counter(struct perf_event *event) ...@@ -633,7 +644,12 @@ static inline u64 armv8pmu_read_counter(struct perf_event *event)
static inline void armv8pmu_write_evcntr(int idx, u32 value) static inline void armv8pmu_write_evcntr(int idx, u32 value)
{ {
write_pmevcntrn(idx, value); if (pmu_nmi_enable) {
write_pmevcntrn(idx, value);
} else {
armv8pmu_select_counter(idx);
write_sysreg(value, pmxevcntr_el0);
}
} }
static inline void armv8pmu_write_hw_counter(struct perf_event *event, static inline void armv8pmu_write_hw_counter(struct perf_event *event,
...@@ -674,8 +690,14 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u64 value) ...@@ -674,8 +690,14 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
static inline void armv8pmu_write_evtype(int idx, u32 val) static inline void armv8pmu_write_evtype(int idx, u32 val)
{ {
val &= ARMV8_PMU_EVTYPE_MASK; if (pmu_nmi_enable) {
write_pmevtypern(idx, val); val &= ARMV8_PMU_EVTYPE_MASK;
write_pmevtypern(idx, val);
} else {
armv8pmu_select_counter(idx);
val &= ARMV8_PMU_EVTYPE_MASK;
write_sysreg(val, pmxevtyper_el0);
}
} }
static inline void armv8pmu_write_event_type(struct perf_event *event) static inline void armv8pmu_write_event_type(struct perf_event *event)
...@@ -778,10 +800,16 @@ static inline u32 armv8pmu_getreset_flags(void) ...@@ -778,10 +800,16 @@ static inline u32 armv8pmu_getreset_flags(void)
static void armv8pmu_enable_event(struct perf_event *event) static void armv8pmu_enable_event(struct perf_event *event)
{ {
unsigned long flags = 0;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* /*
* Enable counter and interrupt, and set the counter to count * Enable counter and interrupt, and set the counter to count
* the event that we're interested in. * the event that we're interested in.
*/ */
if (!pmu_nmi_enable)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
...@@ -802,10 +830,23 @@ static void armv8pmu_enable_event(struct perf_event *event) ...@@ -802,10 +830,23 @@ static void armv8pmu_enable_event(struct perf_event *event)
* Enable counter * Enable counter
*/ */
armv8pmu_enable_event_counter(event); armv8pmu_enable_event_counter(event);
if (!pmu_nmi_enable)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv8pmu_disable_event(struct perf_event *event) static void armv8pmu_disable_event(struct perf_event *event)
{ {
unsigned long flags = 0;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/*
* Disable counter and interrupt
*/
if (!pmu_nmi_enable)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
*/ */
...@@ -815,22 +856,45 @@ static void armv8pmu_disable_event(struct perf_event *event) ...@@ -815,22 +856,45 @@ static void armv8pmu_disable_event(struct perf_event *event)
* Disable interrupt for this counter * Disable interrupt for this counter
*/ */
armv8pmu_disable_event_irq(event); armv8pmu_disable_event_irq(event);
if (!pmu_nmi_enable)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv8pmu_start(struct arm_pmu *cpu_pmu) static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{ {
preempt_disable(); if (pmu_nmi_enable) {
/* Enable all counters */ preempt_disable();
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); /* Enable all counters */
preempt_enable(); armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
preempt_enable();
} else {
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
} }
static void armv8pmu_stop(struct arm_pmu *cpu_pmu) static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
{ {
preempt_disable(); if (pmu_nmi_enable) {
/* Disable all counters */ preempt_disable();
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); /* Disable all counters */
preempt_enable(); armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
preempt_enable();
} else {
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
} }
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
...@@ -895,7 +959,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) ...@@ -895,7 +959,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
* platforms that can have the PMU interrupts raised as an NMI, this * platforms that can have the PMU interrupts raised as an NMI, this
* will not work. * will not work.
*/ */
if (!in_nmi()) if (!pmu_nmi_enable || !in_nmi())
irq_work_run(); irq_work_run();
return IRQ_HANDLED; return IRQ_HANDLED;
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/perf/arm_pmu.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -1120,6 +1121,9 @@ __setup("hardlockup_cpu_freq=", hardlockup_cpu_freq_setup); ...@@ -1120,6 +1121,9 @@ __setup("hardlockup_cpu_freq=", hardlockup_cpu_freq_setup);
u64 hw_nmi_get_sample_period(int watchdog_thresh) u64 hw_nmi_get_sample_period(int watchdog_thresh)
{ {
return hardlockup_cpu_freq * 1000 * watchdog_thresh; if (!pmu_nmi_enable)
return 0;
else
return hardlockup_cpu_freq * 1000 * watchdog_thresh;
} }
#endif #endif
...@@ -44,6 +44,16 @@ static const struct pmu_irq_ops pmuirq_ops = { ...@@ -44,6 +44,16 @@ static const struct pmu_irq_ops pmuirq_ops = {
.free_pmuirq = armpmu_free_pmuirq .free_pmuirq = armpmu_free_pmuirq
}; };
bool pmu_nmi_enable;
static int __init pmu_nmi_enable_setup(char *str)
{
pmu_nmi_enable = true;
return 1;
}
__setup("pmu_nmi_enable", pmu_nmi_enable_setup);
static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
{ {
free_nmi(irq, per_cpu_ptr(devid, cpu)); free_nmi(irq, per_cpu_ptr(devid, cpu));
...@@ -645,10 +655,19 @@ void armpmu_free_irq(int irq, int cpu) ...@@ -645,10 +655,19 @@ void armpmu_free_irq(int irq, int cpu)
if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return; return;
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); if (pmu_nmi_enable) {
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0; per_cpu(cpu_irq, cpu) = 0;
per_cpu(cpu_irq_ops, cpu) = NULL; per_cpu(cpu_irq_ops, cpu) = NULL;
} else {
if (!irq_is_percpu_devid(irq))
free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
else if (armpmu_count_irq_users(irq) == 1)
free_percpu_irq(irq, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0;
}
} }
int armpmu_request_irq(int irq, int cpu) int armpmu_request_irq(int irq, int cpu)
...@@ -660,58 +679,88 @@ int armpmu_request_irq(int irq, int cpu) ...@@ -660,58 +679,88 @@ int armpmu_request_irq(int irq, int cpu)
if (!irq) if (!irq)
return 0; return 0;
if (!irq_is_percpu_devid(irq)) { if (pmu_nmi_enable) {
unsigned long irq_flags; if (!irq_is_percpu_devid(irq)) {
unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
if (err && num_possible_cpus() > 1) { err = irq_force_affinity(irq, cpumask_of(cpu));
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
goto err_out;
}
irq_flags = IRQF_PERCPU | if (err && num_possible_cpus() > 1) {
IRQF_NOBALANCING | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
IRQF_NO_THREAD; irq, cpu);
goto err_out;
}
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_flags = IRQF_PERCPU |
IRQF_NOBALANCING |
IRQF_NO_THREAD;
err = request_nmi(irq, handler, irq_flags, "arm-pmu", irq_set_status_flags(irq, IRQ_NOAUTOEN);
per_cpu_ptr(&cpu_armpmu, cpu));
/* If cannot get an NMI, get a normal interrupt */ err = request_nmi(irq, handler, irq_flags, "arm-pmu",
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu)); per_cpu_ptr(&cpu_armpmu, cpu));
irq_ops = &pmuirq_ops;
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
irq_ops = &pmuirq_ops;
} else {
irq_ops = &pmunmi_ops;
}
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
irq_ops = &percpu_pmunmi_ops;
}
} else { } else {
irq_ops = &pmunmi_ops; /* Per cpudevid irq was already requested by another CPU */
irq_ops = armpmu_find_irq_ops(irq);
if (WARN_ON(!irq_ops))
err = -EINVAL;
} }
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */ if (err)
if (err) { goto err_out;
per_cpu(cpu_irq, cpu) = irq;
per_cpu(cpu_irq_ops, cpu) = irq_ops;
} else {
if (!irq_is_percpu_devid(irq)) {
unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
if (err && num_possible_cpus() > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
goto err_out;
}
irq_flags = IRQF_PERCPU |
IRQF_NOBALANCING |
IRQF_NO_THREAD;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_irq(irq, handler, "arm-pmu", err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu); &cpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
irq_ops = &percpu_pmunmi_ops;
} }
} else {
/* Per cpudevid irq was already requested by another CPU */
irq_ops = armpmu_find_irq_ops(irq);
if (WARN_ON(!irq_ops))
err = -EINVAL;
}
if (err) if (err)
goto err_out; goto err_out;
per_cpu(cpu_irq, cpu) = irq; per_cpu(cpu_irq, cpu) = irq;
per_cpu(cpu_irq_ops, cpu) = irq_ops; }
return 0; return 0;
err_out: err_out:
...@@ -744,8 +793,16 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) ...@@ -744,8 +793,16 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
per_cpu(cpu_armpmu, cpu) = pmu; per_cpu(cpu_armpmu, cpu) = pmu;
irq = armpmu_get_cpu_irq(pmu, cpu); irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) if (irq) {
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); if (pmu_nmi_enable) {
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
} else {
if (irq_is_percpu_devid(irq))
enable_percpu_irq(irq, IRQ_TYPE_NONE);
else
enable_irq(irq);
}
}
return 0; return 0;
} }
...@@ -759,8 +816,16 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) ...@@ -759,8 +816,16 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0; return 0;
irq = armpmu_get_cpu_irq(pmu, cpu); irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) if (irq) {
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); if (pmu_nmi_enable) {
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
} else {
if (irq_is_percpu_devid(irq))
disable_percpu_irq(irq);
else
disable_irq_nosync(irq);
}
}
per_cpu(cpu_armpmu, cpu) = NULL; per_cpu(cpu_armpmu, cpu) = NULL;
...@@ -934,6 +999,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags) ...@@ -934,6 +999,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
struct pmu_hw_events *events; struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu); events = per_cpu_ptr(pmu->hw_events, cpu);
if (!pmu_nmi_enable)
raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu; events->percpu_pmu = pmu;
} }
......
...@@ -58,6 +58,11 @@ struct pmu_hw_events { ...@@ -58,6 +58,11 @@ struct pmu_hw_events {
*/ */
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
/* /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we * When using percpu IRQs, we need a percpu dev_id. Place it here as we
...@@ -164,6 +169,8 @@ int armpmu_register(struct arm_pmu *pmu); ...@@ -164,6 +169,8 @@ int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irq(int irq, int cpu); int armpmu_request_irq(int irq, int cpu);
void armpmu_free_irq(int irq, int cpu); void armpmu_free_irq(int irq, int cpu);
extern bool pmu_nmi_enable;
#define ARMV8_PMU_PDEV_NAME "armv8-pmu" #define ARMV8_PMU_PDEV_NAME "armv8-pmu"
#endif /* CONFIG_ARM_PMU */ #endif /* CONFIG_ARM_PMU */
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h> #include <kvm/arm_pmu.h>
...@@ -284,10 +285,15 @@ static inline struct kvm_vcpu *kvm_pmu_to_vcpu(struct kvm_pmu *pmu) ...@@ -284,10 +285,15 @@ static inline struct kvm_vcpu *kvm_pmu_to_vcpu(struct kvm_pmu *pmu)
static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
{ {
struct kvm_pmu *pmu; struct kvm_pmu *pmu;
struct kvm_vcpu_arch *vcpu_arch;
pmc -= pmc->idx; pmc -= pmc->idx;
pmu = container_of(pmc, struct kvm_pmu, pmc[0]); pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
return kvm_pmu_to_vcpu(pmu); if (pmu_nmi_enable)
return kvm_pmu_to_vcpu(pmu);
vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
return container_of(vcpu_arch, struct kvm_vcpu, arch);
} }
/** /**
...@@ -322,7 +328,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, ...@@ -322,7 +328,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
if (kvm_pmu_overflow_status(vcpu)) { if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
if (!in_nmi()) if (!pmu_nmi_enable || !in_nmi())
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
else else
irq_work_queue(&vcpu->arch.pmu.overflow_work); irq_work_queue(&vcpu->arch.pmu.overflow_work);
...@@ -527,8 +533,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) ...@@ -527,8 +533,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
init_irq_work(&vcpu->arch.pmu.overflow_work, if (pmu_nmi_enable)
kvm_pmu_perf_overflow_notify_vcpu); init_irq_work(&vcpu->arch.pmu.overflow_work,
kvm_pmu_perf_overflow_notify_vcpu);
vcpu->arch.pmu.created = true; vcpu->arch.pmu.created = true;
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册