提交 c347a2f1 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf/x86: Add a few more comments

Add a few comments on the ->add(), ->del() and ->*_txn()
implementation.
Requested-by: NVince Weaver <vincent.weaver@maine.edu>
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-he3819318c245j7t5e1e22tr@git.kernel.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 fdded676
...@@ -892,7 +892,6 @@ static void x86_pmu_enable(struct pmu *pmu) ...@@ -892,7 +892,6 @@ static void x86_pmu_enable(struct pmu *pmu)
* hw_perf_group_sched_in() or x86_pmu_enable() * hw_perf_group_sched_in() or x86_pmu_enable()
* *
* step1: save events moving to new counters * step1: save events moving to new counters
* step2: reprogram moved events into new counters
*/ */
for (i = 0; i < n_running; i++) { for (i = 0; i < n_running; i++) {
event = cpuc->event_list[i]; event = cpuc->event_list[i];
...@@ -918,6 +917,9 @@ static void x86_pmu_enable(struct pmu *pmu) ...@@ -918,6 +917,9 @@ static void x86_pmu_enable(struct pmu *pmu)
x86_pmu_stop(event, PERF_EF_UPDATE); x86_pmu_stop(event, PERF_EF_UPDATE);
} }
/*
* step2: reprogram moved events into new counters
*/
for (i = 0; i < cpuc->n_events; i++) { for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i]; event = cpuc->event_list[i];
hwc = &event->hw; hwc = &event->hw;
...@@ -1043,7 +1045,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) ...@@ -1043,7 +1045,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
/* /*
* If group events scheduling transaction was started, * If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed * skip the schedulability test here, it will be performed
* at commit time (->commit_txn) as a whole * at commit time (->commit_txn) as a whole.
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN) if (cpuc->group_flag & PERF_EVENT_TXN)
goto done_collect; goto done_collect;
...@@ -1058,6 +1060,10 @@ static int x86_pmu_add(struct perf_event *event, int flags) ...@@ -1058,6 +1060,10 @@ static int x86_pmu_add(struct perf_event *event, int flags)
memcpy(cpuc->assign, assign, n*sizeof(int)); memcpy(cpuc->assign, assign, n*sizeof(int));
done_collect: done_collect:
/*
* Commit the collect_events() state. See x86_pmu_del() and
* x86_pmu_*_txn().
*/
cpuc->n_events = n; cpuc->n_events = n;
cpuc->n_added += n - n0; cpuc->n_added += n - n0;
cpuc->n_txn += n - n0; cpuc->n_txn += n - n0;
...@@ -1183,28 +1189,38 @@ static void x86_pmu_del(struct perf_event *event, int flags) ...@@ -1183,28 +1189,38 @@ static void x86_pmu_del(struct perf_event *event, int flags)
* If we're called during a txn, we don't need to do anything. * If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate * The events never got scheduled and ->cancel_txn will truncate
* the event_list. * the event_list.
*
* XXX assumes any ->del() called during a TXN will only be on
* an event added during that same TXN.
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN) if (cpuc->group_flag & PERF_EVENT_TXN)
return; return;
/*
* Not a TXN, therefore cleanup properly.
*/
x86_pmu_stop(event, PERF_EF_UPDATE); x86_pmu_stop(event, PERF_EF_UPDATE);
for (i = 0; i < cpuc->n_events; i++) { for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) { if (event == cpuc->event_list[i])
break;
}
if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
return;
/* If we have a newly added event; make sure to decrease n_added. */
if (i >= cpuc->n_events - cpuc->n_added) if (i >= cpuc->n_events - cpuc->n_added)
--cpuc->n_added; --cpuc->n_added;
if (x86_pmu.put_event_constraints) if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event); x86_pmu.put_event_constraints(cpuc, event);
/* Delete the array entry. */
while (++i < cpuc->n_events) while (++i < cpuc->n_events)
cpuc->event_list[i-1] = cpuc->event_list[i]; cpuc->event_list[i-1] = cpuc->event_list[i];
--cpuc->n_events; --cpuc->n_events;
break;
}
}
perf_event_update_userpage(event); perf_event_update_userpage(event);
} }
...@@ -1598,7 +1614,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) ...@@ -1598,7 +1614,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
{ {
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
/* /*
* Truncate the collected events. * Truncate collected array by the number of events added in this
* transaction. See x86_pmu_add() and x86_pmu_*_txn().
*/ */
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
...@@ -1609,6 +1626,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) ...@@ -1609,6 +1626,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
* Commit group events scheduling transaction * Commit group events scheduling transaction
* Perform the group schedulability test as a whole * Perform the group schedulability test as a whole
* Return 0 if success * Return 0 if success
*
* Does not cancel the transaction on failure; expects the caller to do this.
*/ */
static int x86_pmu_commit_txn(struct pmu *pmu) static int x86_pmu_commit_txn(struct pmu *pmu)
{ {
......
...@@ -130,9 +130,11 @@ struct cpu_hw_events { ...@@ -130,9 +130,11 @@ struct cpu_hw_events {
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled; int enabled;
int n_events; int n_events; /* the # of events in the below arrays */
int n_added; int n_added; /* the # last events in the below arrays;
int n_txn; they've never been enabled yet */
int n_txn; /* the # last events in the below arrays;
added in the current transaction */
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX]; u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册