提交 2da33146 编写于 作者: K Kan Liang 提交者: Ingo Molnar

perf/x86/intel/uncore: Introduce customized event_read() for client IMC uncore

There are two free-running counters for client IMC uncore. The
customized event_init() function hard codes their index to
'UNCORE_PMC_IDX_FIXED' and 'UNCORE_PMC_IDX_FIXED + 1'.
To support the index 'UNCORE_PMC_IDX_FIXED + 1', the generic
uncore_perf_event_update is obscurely hacked.
The code quality issue will bring problems when a new counter index is
introduced into the generic code, for example, a new index for
free-running counter.

Introducing a customized event_read() function for client IMC uncore.
The customized function is copied from previous generic
uncore_pmu_event_read().
The index 'UNCORE_PMC_IDX_FIXED + 1' will be isolated for client IMC
uncore only.
Signed-off-by: NKan Liang <kan.liang@intel.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/1525371913-10597-1-git-send-email-kan.liang@intel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 c52b5c5f
...@@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct perf_event *event, int flags) ...@@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
uncore_pmu_start_hrtimer(box); uncore_pmu_start_hrtimer(box);
} }
static void snb_uncore_imc_event_read(struct perf_event *event)
{
struct intel_uncore_box *box = uncore_event_to_box(event);
u64 prev_count, new_count, delta;
int shift;
/*
* There are two free running counters in IMC.
* The index for the second one is hardcoded to
* UNCORE_PMC_IDX_FIXED + 1.
*/
if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
shift = 64 - uncore_fixed_ctr_bits(box);
else
shift = 64 - uncore_perf_ctr_bits(box);
/* the hrtimer might modify the previous event value */
again:
prev_count = local64_read(&event->hw.prev_count);
new_count = uncore_read_counter(box, event);
if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
goto again;
delta = (new_count << shift) - (prev_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
}
static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
{ {
struct intel_uncore_box *box = uncore_event_to_box(event); struct intel_uncore_box *box = uncore_event_to_box(event);
...@@ -472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) ...@@ -472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
* Drain the remaining delta count out of a event * Drain the remaining delta count out of a event
* that we are disabling: * that we are disabling:
*/ */
uncore_perf_event_update(box, event); snb_uncore_imc_event_read(event);
hwc->state |= PERF_HES_UPTODATE; hwc->state |= PERF_HES_UPTODATE;
} }
} }
...@@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = { ...@@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = {
.del = snb_uncore_imc_event_del, .del = snb_uncore_imc_event_del,
.start = snb_uncore_imc_event_start, .start = snb_uncore_imc_event_start,
.stop = snb_uncore_imc_event_stop, .stop = snb_uncore_imc_event_stop,
.read = uncore_pmu_event_read, .read = snb_uncore_imc_event_read,
}; };
static struct intel_uncore_ops snb_uncore_imc_ops = { static struct intel_uncore_ops snb_uncore_imc_ops = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册