diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 1ae99deeec5491ebddfd212acd8de384e8969d0f..4106a03af182bc9c26b87785c9d7137defd8e3f0 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -68,6 +68,12 @@ enum armv6_counters { ARMV6_COUNTER1, }; +/* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ +DEFINE_PER_CPU(raw_spinlock_t, pmu_lock); + /* * The hardware events that we support. We do support cache operations but * we have harvard caches and no way to combine instruction and data @@ -271,7 +277,7 @@ static void armv6pmu_enable_event(struct perf_event *event) unsigned long val, mask, evt, flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + struct raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -294,12 +300,12 @@ static void armv6pmu_enable_event(struct perf_event *event) * Mask out the current event and set the counter to count the event * that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + raw_spin_lock_irqsave(lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } static irqreturn_t @@ -363,25 +369,25 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) static void armv6pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); - raw_spin_lock_irqsave(&events->pmu_lock, flags); + raw_spin_lock_irqsave(lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } static void armv6pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); - raw_spin_lock_irqsave(&events->pmu_lock, flags); + raw_spin_lock_irqsave(lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } static int @@ -502,6 +508,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->stop = armv6pmu_stop; cpu_pmu->map_event = armv6_map_event; cpu_pmu->num_events = 3; + + raw_spin_lock_init(this_cpu_ptr(&pmu_lock)); } static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d0b7dd8fb184b041446707dceccda87588ba45a2..79194a5b4bdaabbbcdabff42c3e2cf1386d277fb 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -830,7 +830,6 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags) struct pmu_hw_events *events; events = per_cpu_ptr(pmu->hw_events, cpu); - raw_spin_lock_init(&events->pmu_lock); events->percpu_pmu = pmu; } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bf309ff6f2444f2cd516c54b9d8eec5af525cf5b..674f3b1af3b69f8c598c5e8291a0740e5a1b44d1 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -58,11 +58,6 @@ struct pmu_hw_events { */ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); - /* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ - raw_spinlock_t pmu_lock; /* * When using percpu IRQs, we need a percpu dev_id. Place it here as we