提交 63d8e38f 编写于 作者: A Adrian Hunter 提交者: Arnaldo Carvalho de Melo

perf intel-pt: Fix sync_switch

sync_switch is a facility to synchronize decoding more closely with the
point in the kernel when the context actually switched.

The flag when sync_switch is enabled was global to the decoding, whereas
it is really specific to the CPU.

The trace data for different CPUs is put on different queues, so add
sync_switch to the intel_pt_queue structure and use that in preference
to the global setting in the intel_pt structure.

That fixes problems decoding one CPU's trace because sync_switch was
disabled on a different CPU's queue.
Signed-off-by: NAdrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1520431349-30689-3-git-send-email-adrian.hunter@intel.comSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 117db4b2
...@@ -143,6 +143,7 @@ struct intel_pt_queue { ...@@ -143,6 +143,7 @@ struct intel_pt_queue {
bool stop; bool stop;
bool step_through_buffers; bool step_through_buffers;
bool use_buffer_pid_tid; bool use_buffer_pid_tid;
bool sync_switch;
pid_t pid, tid; pid_t pid, tid;
int cpu; int cpu;
int switch_state; int switch_state;
...@@ -963,10 +964,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt, ...@@ -963,10 +964,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
if (pt->timeless_decoding || !pt->have_sched_switch) if (pt->timeless_decoding || !pt->have_sched_switch)
ptq->use_buffer_pid_tid = true; ptq->use_buffer_pid_tid = true;
} }
ptq->sync_switch = pt->sync_switch;
} }
if (!ptq->on_heap && if (!ptq->on_heap &&
(!pt->sync_switch || (!ptq->sync_switch ||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
const struct intel_pt_state *state; const struct intel_pt_state *state;
int ret; int ret;
...@@ -1549,7 +1552,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq) ...@@ -1549,7 +1552,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
if (pt->synth_opts.last_branch) if (pt->synth_opts.last_branch)
intel_pt_update_last_branch_rb(ptq); intel_pt_update_last_branch_rb(ptq);
if (!pt->sync_switch) if (!ptq->sync_switch)
return 0; return 0;
if (intel_pt_is_switch_ip(ptq, state->to_ip)) { if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
...@@ -1630,6 +1633,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) ...@@ -1630,6 +1633,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
return switch_ip; return switch_ip;
} }
static void intel_pt_enable_sync_switch(struct intel_pt *pt)
{
unsigned int i;
pt->sync_switch = true;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq)
ptq->sync_switch = true;
}
}
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{ {
const struct intel_pt_state *state = ptq->state; const struct intel_pt_state *state = ptq->state;
...@@ -1646,7 +1664,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) ...@@ -1646,7 +1664,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (pt->switch_ip) { if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
pt->switch_ip, pt->ptss_ip); pt->switch_ip, pt->ptss_ip);
pt->sync_switch = true; intel_pt_enable_sync_switch(pt);
} }
} }
} }
...@@ -1662,9 +1680,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) ...@@ -1662,9 +1680,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (state->err) { if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA) if (state->err == INTEL_PT_ERR_NODATA)
return 1; return 1;
if (pt->sync_switch && if (ptq->sync_switch &&
state->from_ip >= pt->kernel_start) { state->from_ip >= pt->kernel_start) {
pt->sync_switch = false; ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq); intel_pt_next_tid(pt, ptq);
} }
if (pt->synth_opts.errors) { if (pt->synth_opts.errors) {
...@@ -1690,7 +1708,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) ...@@ -1690,7 +1708,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
state->timestamp, state->est_timestamp); state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp; ptq->timestamp = state->est_timestamp;
/* Use estimated TSC in unknown switch state */ /* Use estimated TSC in unknown switch state */
} else if (pt->sync_switch && } else if (ptq->sync_switch &&
ptq->switch_state == INTEL_PT_SS_UNKNOWN && ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
intel_pt_is_switch_ip(ptq, state->to_ip) && intel_pt_is_switch_ip(ptq, state->to_ip) &&
ptq->next_tid == -1) { ptq->next_tid == -1) {
...@@ -1837,7 +1855,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, ...@@ -1837,7 +1855,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
return 1; return 1;
ptq = intel_pt_cpu_to_ptq(pt, cpu); ptq = intel_pt_cpu_to_ptq(pt, cpu);
if (!ptq) if (!ptq || !ptq->sync_switch)
return 1; return 1;
switch (ptq->switch_state) { switch (ptq->switch_state) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册