提交 a66b0010 编写于 作者: A Andrew Murray 提交者: Ingo Molnar

perf/drivers: Strengthen exclusion checks with PERF_PMU_CAP_NO_EXCLUDE

For drivers that do not support context exclusion let's advertise the
PERF_PMU_CAP_NO_EXCLUDE capability. This ensures that perf will
prevent us from handling events where any exclusion flags are set.
Let's also remove the now unnecessary check for exclusion flags.

This change means that qcom_{l2|l3}_pmu will now also indicate that
they do not support exclude_{host|guest} and that xgene_pmu does
not also support exclude_idle and exclude_hv.

Note that for qcom_l2_pmu we now implictly return -EINVAL instead
of -EOPNOTSUPP. This change will result in the perf userspace
utility retrying the perf_event_open system call with fallback
event attributes that do not fail.
Signed-off-by: NAndrew Murray <andrew.murray@arm.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: NWill Deacon <will.deacon@arm.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Sascha Hauer <s.hauer@pengutronix.de>
Cc: Shawn Guo <shawnguo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: robin.murphy@arm.com
Cc: suzuki.poulose@arm.com
Link: https://lkml.kernel.org/r/1547128414-50693-9-git-send-email-andrew.murray@arm.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 30656398
...@@ -509,14 +509,6 @@ static int l2_cache_event_init(struct perf_event *event) ...@@ -509,14 +509,6 @@ static int l2_cache_event_init(struct perf_event *event)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* We cannot filter accurately so we just don't allow it. */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_hv || event->attr.exclude_idle) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Can't exclude execution levels\n");
return -EOPNOTSUPP;
}
if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
((event->attr.config & ~L2_EVT_MASK) != 0)) && ((event->attr.config & ~L2_EVT_MASK) != 0)) &&
(event->attr.config != L2CYCLE_CTR_RAW_CODE)) { (event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
...@@ -982,6 +974,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev) ...@@ -982,6 +974,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev)
.stop = l2_cache_event_stop, .stop = l2_cache_event_stop,
.read = l2_cache_event_read, .read = l2_cache_event_read,
.attr_groups = l2_cache_pmu_attr_grps, .attr_groups = l2_cache_pmu_attr_grps,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
}; };
l2cache_pmu->num_counters = get_num_counters(); l2cache_pmu->num_counters = get_num_counters();
......
...@@ -494,13 +494,6 @@ static int qcom_l3_cache__event_init(struct perf_event *event) ...@@ -494,13 +494,6 @@ static int qcom_l3_cache__event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type) if (event->attr.type != event->pmu->type)
return -ENOENT; return -ENOENT;
/*
* There are no per-counter mode filters in the PMU.
*/
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_hv || event->attr.exclude_idle)
return -EINVAL;
/* /*
* Sampling not supported since these events are not core-attributable. * Sampling not supported since these events are not core-attributable.
*/ */
...@@ -777,6 +770,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) ...@@ -777,6 +770,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
.read = qcom_l3_cache__event_read, .read = qcom_l3_cache__event_read,
.attr_groups = qcom_l3_cache_pmu_attr_grps, .attr_groups = qcom_l3_cache_pmu_attr_grps,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
}; };
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......
...@@ -917,11 +917,6 @@ static int xgene_perf_event_init(struct perf_event *event) ...@@ -917,11 +917,6 @@ static int xgene_perf_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL; return -EINVAL;
/* SOC counters do not have usr/os/guest/host bits */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_host || event->attr.exclude_guest)
return -EINVAL;
if (event->cpu < 0) if (event->cpu < 0)
return -EINVAL; return -EINVAL;
/* /*
...@@ -1136,6 +1131,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name) ...@@ -1136,6 +1131,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
.start = xgene_perf_start, .start = xgene_perf_start,
.stop = xgene_perf_stop, .stop = xgene_perf_stop,
.read = xgene_perf_read, .read = xgene_perf_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
}; };
/* Hardware counter init */ /* Hardware counter init */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册