提交 e4abb5d4 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: x86: Emulate longer sample periods

Do as Power already does, emulate sample periods up to 2^63-1 by
composing them of smaller values limited by hardware capabilities.
Only once we wrap the software period do we generate an overflow
event.

Just 10 lines of new code.
Reported-by: NStephane Eranian <eranian@googlemail.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 8a016db3
...@@ -287,8 +287,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -287,8 +287,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (!hwc->sample_period) if (!hwc->sample_period)
hwc->sample_period = x86_pmu.max_period; hwc->sample_period = x86_pmu.max_period;
atomic64_set(&hwc->period_left, atomic64_set(&hwc->period_left, hwc->sample_period);
min(x86_pmu.max_period, hwc->sample_period));
/* /*
* Raw event type provide the config in the event structure * Raw event type provide the config in the event structure
...@@ -451,13 +450,13 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); ...@@ -451,13 +450,13 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
* Set the next IRQ period, based on the hwc->period_left value. * Set the next IRQ period, based on the hwc->period_left value.
* To be called with the counter disabled in hw: * To be called with the counter disabled in hw:
*/ */
static void static int
x86_perf_counter_set_period(struct perf_counter *counter, x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx) struct hw_perf_counter *hwc, int idx)
{ {
s64 left = atomic64_read(&hwc->period_left); s64 left = atomic64_read(&hwc->period_left);
s64 period = min(x86_pmu.max_period, hwc->sample_period); s64 period = hwc->sample_period;
int err; int err, ret = 0;
/* /*
* If we are way outside a reasoable range then just skip forward: * If we are way outside a reasoable range then just skip forward:
...@@ -465,11 +464,13 @@ x86_perf_counter_set_period(struct perf_counter *counter, ...@@ -465,11 +464,13 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (unlikely(left <= -period)) { if (unlikely(left <= -period)) {
left = period; left = period;
atomic64_set(&hwc->period_left, left); atomic64_set(&hwc->period_left, left);
ret = 1;
} }
if (unlikely(left <= 0)) { if (unlikely(left <= 0)) {
left += period; left += period;
atomic64_set(&hwc->period_left, left); atomic64_set(&hwc->period_left, left);
ret = 1;
} }
/* /*
* Quirk: certain CPUs dont like it if just 1 event is left: * Quirk: certain CPUs dont like it if just 1 event is left:
...@@ -477,6 +478,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, ...@@ -477,6 +478,9 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (unlikely(left < 2)) if (unlikely(left < 2))
left = 2; left = 2;
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
per_cpu(prev_left[idx], smp_processor_id()) = left; per_cpu(prev_left[idx], smp_processor_id()) = left;
/* /*
...@@ -487,6 +491,8 @@ x86_perf_counter_set_period(struct perf_counter *counter, ...@@ -487,6 +491,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
err = checking_wrmsrl(hwc->counter_base + idx, err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask); (u64)(-left) & x86_pmu.counter_mask);
return ret;
} }
static inline void static inline void
...@@ -706,16 +712,19 @@ static void x86_pmu_disable(struct perf_counter *counter) ...@@ -706,16 +712,19 @@ static void x86_pmu_disable(struct perf_counter *counter)
* Save and restart an expired counter. Called by NMI contexts, * Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops: * so it has to be careful about preempting normal counter ops:
*/ */
static void intel_pmu_save_and_restart(struct perf_counter *counter) static int intel_pmu_save_and_restart(struct perf_counter *counter)
{ {
struct hw_perf_counter *hwc = &counter->hw; struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx; int idx = hwc->idx;
int ret;
x86_perf_counter_update(counter, hwc, idx); x86_perf_counter_update(counter, hwc, idx);
x86_perf_counter_set_period(counter, hwc, idx); ret = x86_perf_counter_set_period(counter, hwc, idx);
if (counter->state == PERF_COUNTER_STATE_ACTIVE) if (counter->state == PERF_COUNTER_STATE_ACTIVE)
intel_pmu_enable_counter(hwc, idx); intel_pmu_enable_counter(hwc, idx);
return ret;
} }
static void intel_pmu_reset(void) static void intel_pmu_reset(void)
...@@ -782,7 +791,9 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -782,7 +791,9 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
if (!test_bit(bit, cpuc->active_mask)) if (!test_bit(bit, cpuc->active_mask))
continue; continue;
intel_pmu_save_and_restart(counter); if (!intel_pmu_save_and_restart(counter))
continue;
if (perf_counter_overflow(counter, nmi, regs, 0)) if (perf_counter_overflow(counter, nmi, regs, 0))
intel_pmu_disable_counter(&counter->hw, bit); intel_pmu_disable_counter(&counter->hw, bit);
} }
...@@ -824,9 +835,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -824,9 +835,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
continue; continue;
/* counter overflow */ /* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx);
handled = 1; handled = 1;
inc_irq_stat(apic_perf_irqs); inc_irq_stat(apic_perf_irqs);
if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;
if (perf_counter_overflow(counter, nmi, regs, 0)) if (perf_counter_overflow(counter, nmi, regs, 0))
amd_pmu_disable_counter(hwc, idx); amd_pmu_disable_counter(hwc, idx);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册