提交 3ef18ed9 编写于 作者: J Julien Thierry 提交者: Xie XiuQi

perf/arm_pmu: Move PMU lock to ARMv6 events

hulk inclusion
category: feature
bugzilla: 12804
CVE: NA

-------------------------------------------------

Perf event backend for ARMv8 and ARMv7 no longer uses the pmu_lock.
The only remaining user is the ARMv6 event backend.

Move the pmu_lock out of the generic arm_pmu driver into the ARMv6 code.
Signed-off-by: NJulien Thierry <julien.thierry@arm.com>
Signed-off-by: NWei Li <liwei391@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 1b2b53da
......@@ -68,6 +68,12 @@ enum armv6_counters {
ARMV6_COUNTER1,
};
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
DEFINE_PER_CPU(raw_spinlock_t, pmu_lock);
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
......@@ -271,7 +277,7 @@ static void armv6pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
struct raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
......@@ -294,12 +300,12 @@ static void armv6pmu_enable_event(struct perf_event *event)
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
raw_spin_unlock_irqrestore(lock, flags);
}
static irqreturn_t
......@@ -363,25 +369,25 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
raw_spin_unlock_irqrestore(lock, flags);
}
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
raw_spin_unlock_irqrestore(lock, flags);
}
static int
......@@ -502,6 +508,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
raw_spin_lock_init(this_cpu_ptr(&pmu_lock));
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
......
......@@ -830,7 +830,6 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
......
......@@ -58,11 +58,6 @@ struct pmu_hw_events {
*/
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
/*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册