提交 b04243ef 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Complete software pmu grouping

Aside from allowing software events into a !software group,
allow adding !software events to pure software groups.

Once we've moved the software group and attached the first
!software event, the group will no longer be a pure software
group and hence no longer be eligible for movement, at which
point the straight ctx comparison is correct again.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20100917093009.410784731@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 d14b12d7
...@@ -804,12 +804,18 @@ struct perf_event { ...@@ -804,12 +804,18 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
}; };
enum perf_event_context_type {
task_context,
cpu_context,
};
/** /**
* struct perf_event_context - event context structure * struct perf_event_context - event context structure
* *
* Used as a container for task events and CPU events as well: * Used as a container for task events and CPU events as well:
*/ */
struct perf_event_context { struct perf_event_context {
enum perf_event_context_type type;
struct pmu *pmu; struct pmu *pmu;
/* /*
* Protect the states of the events in the list, * Protect the states of the events in the list,
......
...@@ -5184,6 +5184,7 @@ int perf_pmu_register(struct pmu *pmu) ...@@ -5184,6 +5184,7 @@ int perf_pmu_register(struct pmu *pmu)
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx); __perf_event_init_context(&cpuctx->ctx);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu; cpuctx->ctx.pmu = pmu;
cpuctx->timer_interval = TICK_NSEC; cpuctx->timer_interval = TICK_NSEC;
hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
...@@ -5517,7 +5518,8 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -5517,7 +5518,8 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr, struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{ {
struct perf_event *event, *group_leader = NULL, *output_event = NULL; struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr; struct perf_event_attr attr;
struct perf_event_context *ctx; struct perf_event_context *ctx;
struct file *event_file = NULL; struct file *event_file = NULL;
...@@ -5525,6 +5527,7 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -5525,6 +5527,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct task_struct *task = NULL; struct task_struct *task = NULL;
struct pmu *pmu; struct pmu *pmu;
int event_fd; int event_fd;
int move_group = 0;
int fput_needed = 0; int fput_needed = 0;
int err; int err;
...@@ -5574,8 +5577,29 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -5574,8 +5577,29 @@ SYSCALL_DEFINE5(perf_event_open,
* any hardware group. * any hardware group.
*/ */
pmu = event->pmu; pmu = event->pmu;
if ((pmu->task_ctx_nr == perf_sw_context) && group_leader)
pmu = group_leader->pmu; if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
if (pid != -1) if (pid != -1)
task = find_lively_task_by_vpid(pid); task = find_lively_task_by_vpid(pid);
...@@ -5605,8 +5629,14 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -5605,8 +5629,14 @@ SYSCALL_DEFINE5(perf_event_open,
* Do not allow to attach to a group in a different * Do not allow to attach to a group in a different
* task or CPU context: * task or CPU context:
*/ */
if (group_leader->ctx != ctx) if (move_group) {
goto err_context; if (group_leader->ctx->type != ctx->type)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/* /*
* Only a group leader can be exclusive or pinned * Only a group leader can be exclusive or pinned
*/ */
...@@ -5626,9 +5656,34 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -5626,9 +5656,34 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context; goto err_context;
} }
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
perf_event_remove_from_context(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_event_remove_from_context(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
event->filp = event_file; event->filp = event_file;
WARN_ON_ONCE(ctx->parent_ctx); WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
if (move_group) {
perf_install_in_context(ctx, group_leader, cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, cpu); perf_install_in_context(ctx, event, cpu);
++ctx->generation; ++ctx->generation;
mutex_unlock(&ctx->mutex); mutex_unlock(&ctx->mutex);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册