提交 c8aab2e0 编写于 作者: S Stephane Eranian 提交者: Ingo Molnar

perf/x86: Clean up __intel_pmu_pebs_event() code

This patch makes the code more readable. It also renames
precise_store_data_hsw() to precise_datala_hsw() because
the function is called for both loads and stores on HSW.
The patch also gets rid of the hardcoded store events
codes in that same function.
Signed-off-by: NStephane Eranian <eranian@google.com>
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1407785233-32193-5-git-send-email-eranian@google.com
Cc: ak@linux.intel.com
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 770eee1f
...@@ -108,10 +108,9 @@ static u64 precise_store_data(u64 status) ...@@ -108,10 +108,9 @@ static u64 precise_store_data(u64 status)
return val; return val;
} }
static u64 precise_store_data_hsw(struct perf_event *event, u64 status) static u64 precise_datala_hsw(struct perf_event *event, u64 status)
{ {
union perf_mem_data_src dse; union perf_mem_data_src dse;
u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK;
dse.val = PERF_MEM_NA; dse.val = PERF_MEM_NA;
...@@ -128,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status) ...@@ -128,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
* MEM_UOPS_RETIRED.SPLIT_STORES * MEM_UOPS_RETIRED.SPLIT_STORES
* MEM_UOPS_RETIRED.ALL_STORES * MEM_UOPS_RETIRED.ALL_STORES
*/ */
if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0) if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
return dse.val; if (status & 1)
dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
if (status & 1) else
dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
else }
dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
/* Nothing else supported. Sorry. */
return dse.val; return dse.val;
} }
...@@ -825,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs) ...@@ -825,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
static void __intel_pmu_pebs_event(struct perf_event *event, static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs) struct pt_regs *iregs, void *__pebs)
{ {
#define PERF_X86_EVENT_PEBS_HSW_PREC \
(PERF_X86_EVENT_PEBS_ST_HSW | \
PERF_X86_EVENT_PEBS_LD_HSW | \
PERF_X86_EVENT_PEBS_NA_HSW)
/* /*
* We cast to the biggest pebs_record but are careful not to * We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries. * unconditionally access the 'extra' entries.
...@@ -834,47 +834,40 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -834,47 +834,40 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct perf_sample_data data; struct perf_sample_data data;
struct pt_regs regs; struct pt_regs regs;
u64 sample_type; u64 sample_type;
int fll, fst; int fll, fst, dsrc;
int fl = event->hw.flags;
if (!intel_pmu_save_and_restart(event)) if (!intel_pmu_save_and_restart(event))
return; return;
fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; sample_type = event->attr.sample_type;
fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST | dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
PERF_X86_EVENT_PEBS_ST_HSW |
PERF_X86_EVENT_PEBS_LD_HSW | fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
PERF_X86_EVENT_PEBS_NA_HSW); fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
perf_sample_data_init(&data, 0, event->hw.last_period); perf_sample_data_init(&data, 0, event->hw.last_period);
data.period = event->hw.last_period; data.period = event->hw.last_period;
sample_type = event->attr.sample_type;
/* /*
* if PEBS-LL or PreciseStore * Use latency for weight (only avail with PEBS-LL)
*/ */
if (fll || fst) { if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
/* data.weight = pebs->lat;
* Use latency for weight (only avail with PEBS-LL)
*/ /*
if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) * data.data_src encodes the data source
data.weight = pebs->lat; */
if (dsrc) {
/* u64 val = PERF_MEM_NA;
* data.data_src encodes the data source if (fll)
*/ val = load_latency_data(pebs->dse);
if (sample_type & PERF_SAMPLE_DATA_SRC) { else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
if (fll) val = precise_datala_hsw(event, pebs->dse);
data.data_src.val = load_latency_data(pebs->dse); else if (fst)
else if (event->hw.flags & val = precise_store_data(pebs->dse);
(PERF_X86_EVENT_PEBS_ST_HSW| data.data_src.val = val;
PERF_X86_EVENT_PEBS_LD_HSW|
PERF_X86_EVENT_PEBS_NA_HSW))
data.data_src.val =
precise_store_data_hsw(event, pebs->dse);
else
data.data_src.val = precise_store_data(pebs->dse);
}
} }
/* /*
...@@ -901,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -901,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
else else
regs.flags &= ~PERF_EFLAGS_EXACT; regs.flags &= ~PERF_EFLAGS_EXACT;
if ((event->attr.sample_type & PERF_SAMPLE_ADDR) && if ((sample_type & PERF_SAMPLE_ADDR) &&
x86_pmu.intel_cap.pebs_format >= 1) x86_pmu.intel_cap.pebs_format >= 1)
data.addr = pebs->dla; data.addr = pebs->dla;
if (x86_pmu.intel_cap.pebs_format >= 2) { if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */ /* Only set the TSX weight when no memory weight. */
if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll) if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
data.weight = intel_hsw_weight(pebs); data.weight = intel_hsw_weight(pebs);
if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION) if (sample_type & PERF_SAMPLE_TRANSACTION)
data.txn = intel_hsw_transaction(pebs); data.txn = intel_hsw_transaction(pebs);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册