提交 8d2cacbb 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Cleanup {start,commit,cancel}_txn details

Clarify some of the transactional group scheduling API details
and change it so that a successfull ->commit_txn also closes
the transaction.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1274803086.5882.1752.camel@twins>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 3af9e859
...@@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event) ...@@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event)
* skip the schedulability test here, it will be peformed * skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole * at commit time(->commit_txn) as a whole
*/ */
if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED) if (cpuhw->group_flag & PERF_EVENT_TXN)
goto nocheck; goto nocheck;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
...@@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu) ...@@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events; cpuhw->n_txn_start = cpuhw->n_events;
} }
...@@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) ...@@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; cpuhw->group_flag &= ~PERF_EVENT_TXN;
} }
/* /*
...@@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu) ...@@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu)
for (i = cpuhw->n_txn_start; i < n; ++i) for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
return 0; return 0;
} }
......
...@@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event) ...@@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event)
* skip the schedulability test here, it will be peformed * skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole * at commit time(->commit_txn) as a whole
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) if (cpuc->group_flag & PERF_EVENT_TXN)
goto nocheck; goto nocheck;
if (check_excludes(cpuc->event, n0, 1)) if (check_excludes(cpuc->event, n0, 1))
...@@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) ...@@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; cpuhw->group_flag |= PERF_EVENT_TXN;
} }
/* /*
...@@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) ...@@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; cpuhw->group_flag &= ~PERF_EVENT_TXN;
} }
/* /*
...@@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) ...@@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
if (sparc_check_constraints(cpuc->event, cpuc->events, n)) if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN; return -EAGAIN;
cpuc->group_flag &= ~PERF_EVENT_TXN;
return 0; return 0;
} }
......
...@@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event) ...@@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event)
* skip the schedulability test here, it will be peformed * skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole * at commit time(->commit_txn) as a whole
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) if (cpuc->group_flag & PERF_EVENT_TXN)
goto out; goto out;
ret = x86_pmu.schedule_events(cpuc, n, assign); ret = x86_pmu.schedule_events(cpuc, n, assign);
...@@ -1096,7 +1096,7 @@ static void x86_pmu_disable(struct perf_event *event) ...@@ -1096,7 +1096,7 @@ static void x86_pmu_disable(struct perf_event *event)
* The events never got scheduled and ->cancel_txn will truncate * The events never got scheduled and ->cancel_txn will truncate
* the event_list. * the event_list.
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) if (cpuc->group_flag & PERF_EVENT_TXN)
return; return;
x86_pmu_stop(event); x86_pmu_stop(event);
...@@ -1388,7 +1388,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) ...@@ -1388,7 +1388,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
cpuc->group_flag |= PERF_EVENT_TXN_STARTED; cpuc->group_flag |= PERF_EVENT_TXN;
cpuc->n_txn = 0; cpuc->n_txn = 0;
} }
...@@ -1401,7 +1401,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) ...@@ -1401,7 +1401,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; cpuc->group_flag &= ~PERF_EVENT_TXN;
/* /*
* Truncate the collected events. * Truncate the collected events.
*/ */
...@@ -1435,11 +1435,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) ...@@ -1435,11 +1435,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
*/ */
memcpy(cpuc->assign, assign, n*sizeof(int)); memcpy(cpuc->assign, assign, n*sizeof(int));
/* cpuc->group_flag &= ~PERF_EVENT_TXN;
* Clear out the txn count so that ->cancel_txn() which gets
* run after ->commit_txn() doesn't undo things.
*/
cpuc->n_txn = 0;
return 0; return 0;
} }
......
...@@ -549,7 +549,10 @@ struct hw_perf_event { ...@@ -549,7 +549,10 @@ struct hw_perf_event {
struct perf_event; struct perf_event;
#define PERF_EVENT_TXN_STARTED 1 /*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
#define PERF_EVENT_TXN 0x1
/** /**
* struct pmu - generic performance monitoring unit * struct pmu - generic performance monitoring unit
...@@ -563,14 +566,28 @@ struct pmu { ...@@ -563,14 +566,28 @@ struct pmu {
void (*unthrottle) (struct perf_event *event); void (*unthrottle) (struct perf_event *event);
/* /*
* group events scheduling is treated as a transaction, * Group events scheduling is treated as a transaction, add group
* add group events as a whole and perform one schedulability test. * events as a whole and perform one schedulability test. If the test
* If test fails, roll back the whole group * fails, roll back the whole group
*/ */
/*
* Start the transaction, after this ->enable() doesn't need
* to do schedulability tests.
*/
void (*start_txn) (const struct pmu *pmu); void (*start_txn) (const struct pmu *pmu);
void (*cancel_txn) (const struct pmu *pmu); /*
* If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
int (*commit_txn) (const struct pmu *pmu); int (*commit_txn) (const struct pmu *pmu);
/*
* Will cancel the transaction, assumes ->disable() is called for
* each successfull ->enable() during the transaction.
*/
void (*cancel_txn) (const struct pmu *pmu);
}; };
/** /**
......
...@@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event, ...@@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event,
struct perf_event *event, *partial_group = NULL; struct perf_event *event, *partial_group = NULL;
const struct pmu *pmu = group_event->pmu; const struct pmu *pmu = group_event->pmu;
bool txn = false; bool txn = false;
int ret;
if (group_event->state == PERF_EVENT_STATE_OFF) if (group_event->state == PERF_EVENT_STATE_OFF)
return 0; return 0;
...@@ -703,15 +702,9 @@ group_sched_in(struct perf_event *group_event, ...@@ -703,15 +702,9 @@ group_sched_in(struct perf_event *group_event,
} }
} }
if (!txn) if (!txn || !pmu->commit_txn(pmu))
return 0; return 0;
ret = pmu->commit_txn(pmu);
if (!ret) {
pmu->cancel_txn(pmu);
return 0;
}
group_error: group_error:
/* /*
* Groups can be scheduled in as one unit only, so undo any * Groups can be scheduled in as one unit only, so undo any
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册