提交 2023b359 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: inheritable sample counters

Redirect the output to the parent counter and put in some sanity checks.

[ Impact: new perfcounter feature - inherited sampling counters ]
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090505155437.331556171@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 22c1558e
...@@ -738,10 +738,18 @@ static void perf_counter_enable(struct perf_counter *counter) ...@@ -738,10 +738,18 @@ static void perf_counter_enable(struct perf_counter *counter)
spin_unlock_irq(&ctx->lock); spin_unlock_irq(&ctx->lock);
} }
static void perf_counter_refresh(struct perf_counter *counter, int refresh) static int perf_counter_refresh(struct perf_counter *counter, int refresh)
{ {
/*
* not supported on inherited counters
*/
if (counter->hw_event.inherit)
return -EINVAL;
atomic_add(refresh, &counter->event_limit); atomic_add(refresh, &counter->event_limit);
perf_counter_enable(counter); perf_counter_enable(counter);
return 0;
} }
/* /*
...@@ -1307,7 +1315,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -1307,7 +1315,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
perf_counter_disable_family(counter); perf_counter_disable_family(counter);
break; break;
case PERF_COUNTER_IOC_REFRESH: case PERF_COUNTER_IOC_REFRESH:
perf_counter_refresh(counter, arg); err = perf_counter_refresh(counter, arg);
break; break;
case PERF_COUNTER_IOC_RESET: case PERF_COUNTER_IOC_RESET:
perf_counter_reset(counter); perf_counter_reset(counter);
...@@ -1814,6 +1822,12 @@ static int perf_output_begin(struct perf_output_handle *handle, ...@@ -1814,6 +1822,12 @@ static int perf_output_begin(struct perf_output_handle *handle,
struct perf_mmap_data *data; struct perf_mmap_data *data;
unsigned int offset, head; unsigned int offset, head;
/*
* For inherited counters we send all the output towards the parent.
*/
if (counter->parent)
counter = counter->parent;
rcu_read_lock(); rcu_read_lock();
data = rcu_dereference(counter->data); data = rcu_dereference(counter->data);
if (!data) if (!data)
...@@ -1995,6 +2009,9 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -1995,6 +2009,9 @@ static void perf_counter_output(struct perf_counter *counter,
if (record_type & PERF_RECORD_ADDR) if (record_type & PERF_RECORD_ADDR)
perf_output_put(&handle, addr); perf_output_put(&handle, addr);
/*
* XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
*/
if (record_type & PERF_RECORD_GROUP) { if (record_type & PERF_RECORD_GROUP) {
struct perf_counter *leader, *sub; struct perf_counter *leader, *sub;
u64 nr = counter->nr_siblings; u64 nr = counter->nr_siblings;
...@@ -2281,6 +2298,11 @@ int perf_counter_overflow(struct perf_counter *counter, ...@@ -2281,6 +2298,11 @@ int perf_counter_overflow(struct perf_counter *counter,
int events = atomic_read(&counter->event_limit); int events = atomic_read(&counter->event_limit);
int ret = 0; int ret = 0;
/*
* XXX event_limit might not quite work as expected on inherited
* counters
*/
counter->pending_kill = POLL_IN; counter->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&counter->event_limit)) { if (events && atomic_dec_and_test(&counter->event_limit)) {
ret = 1; ret = 1;
...@@ -2801,6 +2823,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, ...@@ -2801,6 +2823,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
pmu = NULL; pmu = NULL;
/*
* we currently do not support PERF_RECORD_GROUP on inherited counters
*/
if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
goto done;
if (perf_event_raw(hw_event)) { if (perf_event_raw(hw_event)) {
pmu = hw_perf_counter_init(counter); pmu = hw_perf_counter_init(counter);
goto done; goto done;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册