提交 81fff278 编写于 作者: P Peter Zijlstra 提交者: Joseph Qi

ICX: perf/x86: Support constraint ranges

commit 63b79f6ebc464afb730bc45762c820795e276da1 upstream.

Icelake extended the general counters to 8, even when SMT is enabled.
However only a (large) subset of the events can be used on all 8
counters.

The events that can or cannot be used on all counters are organized
in ranges.

A lot of scheduler constraints are required to handle all this.

To avoid blowing up the tables add event code ranges to the constraint
tables, and a new inline function to match them.
Originally-by: NAndi Kleen <ak@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> # developer hat on
Signed-off-by: NKan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> # maintainer hat on
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: acme@kernel.org
Cc: jolsa@kernel.org
Link: https://lkml.kernel.org/r/20190402194509.2832-8-kan.liang@linux.intel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
Signed-off-by: NShen, Xiaochen <xiaochen.shen@intel.com>
Signed-off-by: NJeffle Xu <jefflexu@linux.alibaba.com>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Acked-by: NCaspar Zhang <caspar@linux.alibaba.com>
上级 669e9266
...@@ -2578,7 +2578,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, ...@@ -2578,7 +2578,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (x86_pmu.event_constraints) { if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code) { if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags; event->hw.flags |= c->flags;
return c; return c;
} }
......
...@@ -858,7 +858,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) ...@@ -858,7 +858,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
if (x86_pmu.pebs_constraints) { if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) { for_each_event_constraint(c, x86_pmu.pebs_constraints) {
if ((event->hw.config & c->cmask) == c->code) { if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags; event->hw.flags |= c->flags;
return c; return c;
} }
......
...@@ -54,7 +54,14 @@ struct event_constraint { ...@@ -54,7 +54,14 @@ struct event_constraint {
int weight; int weight;
int overlap; int overlap;
int flags; int flags;
unsigned int size;
}; };
static inline bool constraint_match(struct event_constraint *c, u64 ecode)
{
return ((ecode & c->cmask) - c->code) <= (u64)c->size;
}
/* /*
* struct hw_perf_event.flags flags * struct hw_perf_event.flags flags
*/ */
...@@ -281,18 +288,29 @@ struct cpu_hw_events { ...@@ -281,18 +288,29 @@ struct cpu_hw_events {
void *kfree_on_online[X86_PERF_KFREE_MAX]; void *kfree_on_online[X86_PERF_KFREE_MAX];
}; };
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
{ .idxmsk64 = (n) }, \ { .idxmsk64 = (n) }, \
.code = (c), \ .code = (c), \
.size = (e) - (c), \
.cmask = (m), \ .cmask = (m), \
.weight = (w), \ .weight = (w), \
.overlap = (o), \ .overlap = (o), \
.flags = f, \ .flags = f, \
} }
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
__EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
#define EVENT_CONSTRAINT(c, n, m) \ #define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
/*
* The constraint_match() function only works for 'simple' event codes
* and not for extended (AMD64_EVENTSEL_EVENT) events codes.
*/
#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
#define INTEL_EXCLEVT_CONSTRAINT(c, n) \ #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
0, PERF_X86_EVENT_EXCL) 0, PERF_X86_EVENT_EXCL)
...@@ -321,6 +339,12 @@ struct cpu_hw_events { ...@@ -321,6 +339,12 @@ struct cpu_hw_events {
#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
/*
* Constraint on a range of Event codes
*/
#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
/* /*
* Constraint on the Event code. * Constraint on the Event code.
*/ */
...@@ -374,6 +398,9 @@ struct cpu_hw_events { ...@@ -374,6 +398,9 @@ struct cpu_hw_events {
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
/* Check only flags, but allow all event/umask */ /* Check only flags, but allow all event/umask */
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
...@@ -390,6 +417,11 @@ struct cpu_hw_events { ...@@ -390,6 +417,11 @@ struct cpu_hw_events {
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
__EVENT_CONSTRAINT_RANGE(code, end, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
__EVENT_CONSTRAINT(code, n, \ __EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册