提交 ce9f85c3 编写于 作者: I Ingo Molnar

Merge tag 'perf-urgent-for-mingo-4.17-20180406' of...

Merge tag 'perf-urgent-for-mingo-4.17-20180406' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes from Arnaldo Carvalho de Melo:

- Show group details on the title line in the annotate browser
  and 'perf annotate --stdio2' output, so that the per-event
  columns can have headers (Arnaldo Carvalho de Melo)

- Fixup vertical line separating metrics from instructions and
  cleaning unused lines at the bottom, both in the annotate TUI
  browser (Arnaldo Carvalho de Melo)

- Remove duplicated 'samples' in lost samples warning in
  'perf report' (Arnaldo Carvalho de Melo)

- Synchronize i915_drm.h, silencing the perf build process,
  automagically adding support for the new DRM_I915_QUERY
  ioctl (Arnaldo Carvalho de Melo)

- Make auxtrace_queues__add_buffer() allocate struct buffer,
  from a patchkit already applied (Adrian Hunter)
Signed-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
......@@ -318,6 +318,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_OPEN 0x36
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
......@@ -375,6 +376,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
......@@ -1358,7 +1360,9 @@ struct drm_intel_overlay_attrs {
* active on a given plane.
*/
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
* flags==0 to disable colorkeying.
*/
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
......@@ -1604,15 +1608,115 @@ struct drm_i915_perf_oa_config {
__u32 n_flex_regs;
/*
* These fields are pointers to tuples of u32 values (register
* address, value). For example the expected length of the buffer
* pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
* These fields are pointers to tuples of u32 values (register address,
* value). For example the expected length of the buffer pointed by
* mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
*/
__u64 mux_regs_ptr;
__u64 boolean_regs_ptr;
__u64 flex_regs_ptr;
};
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
/*
* When set to zero by userspace, this is filled with the size of the
* data to be written at the data_ptr pointer. The kernel sets this
* value to a negative value to signal an error on a particular query
* item.
*/
__s32 length;
/*
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
/*
* Data will be written at the location pointed by data_ptr when the
* value of length matches the length of the data to be written by the
* kernel.
*/
__u64 data_ptr;
};
struct drm_i915_query {
__u32 num_items;
/*
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
/*
* This points to an array of num_items drm_i915_query_item structures.
*/
__u64 items_ptr;
};
/*
* Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
*
* data: contains the 3 pieces of information :
*
* - the slice mask with one bit per slice telling whether a slice is
* available. The availability of slice X can be queried with the following
* formula :
*
* (data[X / 8] >> (X % 8)) & 1
*
* - the subslice mask for each slice with one bit per subslice telling
* whether a subslice is available. The availability of subslice Y in slice
* X can be queried with the following formula :
*
* (data[subslice_offset +
* X * subslice_stride +
* Y / 8] >> (Y % 8)) & 1
*
* - the EU mask for each subslice in each slice with one bit per EU telling
* whether an EU is available. The availability of EU Z in subslice Y in
* slice X can be queried with the following formula :
*
* (data[eu_offset +
* (X * max_subslices + Y) * eu_stride +
* Z / 8] >> (Z % 8)) & 1
*/
struct drm_i915_query_topology_info {
/*
* Unused for now. Must be cleared to zero.
*/
__u16 flags;
__u16 max_slices;
__u16 max_subslices;
__u16 max_eus_per_subslice;
/*
* Offset in data[] at which the subslice masks are stored.
*/
__u16 subslice_offset;
/*
* Stride at which each of the subslice masks for each slice are
* stored.
*/
__u16 subslice_stride;
/*
* Offset in data[] at which the EU masks are stored.
*/
__u16 eu_offset;
/*
* Stride at which each of the EU masks for each subslice are stored.
*/
__u16 eu_stride;
__u8 data[];
};
#if defined(__cplusplus)
}
#endif
......
......@@ -343,8 +343,8 @@ static int __ui_browser__refresh(struct ui_browser *browser)
else
width += 1;
SLsmg_fill_region(browser->y + row, browser->x,
browser->height - row, width, ' ');
SLsmg_fill_region(browser->y + row + browser->extra_title_lines, browser->x,
browser->rows - row, width, ' ');
return 0;
}
......
......@@ -218,7 +218,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
__ui_browser__vline(browser, pcnt_width, 0, browser->height - 1);
__ui_browser__vline(browser, pcnt_width, 0, browser->rows - 1);
return ret;
}
......
......@@ -2600,7 +2600,7 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
bool show_freq)
{
const char *ev_name = perf_evsel__name(evsel);
char ref[30] = " show reference callgraph, ";
char buf[1024], ref[30] = " show reference callgraph, ";
char sample_freq_str[64] = "";
unsigned long nr_samples = 0;
int nr_members = 1;
......@@ -2609,8 +2609,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
char unit;
int i;
if (perf_evsel__is_group_event(evsel))
if (perf_evsel__is_group_event(evsel)) {
perf_evsel__group_desc(evsel, buf, sizeof(buf));
ev_name = buf;
nr_members = evsel->nr_members;
}
for (i = 0; i < nr_members; i++) {
struct sym_hist *ah = annotation__histogram(notes, evsel->idx + i);
......
......@@ -308,7 +308,11 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
struct auxtrace_buffer *buffer,
struct auxtrace_buffer **buffer_ptr)
{
int err;
int err = -ENOMEM;
buffer = memdup(buffer, sizeof(*buffer));
if (!buffer)
return -ENOMEM;
if (session->one_mmap) {
buffer->data = buffer->data_offset - session->one_mmap_offset +
......@@ -316,24 +320,28 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
} else if (perf_data__is_pipe(session->data)) {
buffer->data = auxtrace_copy_data(buffer->size, session);
if (!buffer->data)
return -ENOMEM;
goto out_free;
buffer->data_needs_freeing = true;
} else if (BITS_PER_LONG == 32 &&
buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
err = auxtrace_queues__split_buffer(queues, idx, buffer);
if (err)
return err;
goto out_free;
}
err = auxtrace_queues__queue_buffer(queues, idx, buffer);
if (err)
return err;
goto out_free;
/* FIXME: Doesn't work for split buffer */
if (buffer_ptr)
*buffer_ptr = buffer;
return 0;
out_free:
auxtrace_buffer__free(buffer);
return err;
}
static bool filter_cpu(struct perf_session *session, int cpu)
......@@ -348,36 +356,22 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
union perf_event *event, off_t data_offset,
struct auxtrace_buffer **buffer_ptr)
{
struct auxtrace_buffer *buffer;
unsigned int idx;
int err;
struct auxtrace_buffer buffer = {
.pid = -1,
.tid = event->auxtrace.tid,
.cpu = event->auxtrace.cpu,
.data_offset = data_offset,
.offset = event->auxtrace.offset,
.reference = event->auxtrace.reference,
.size = event->auxtrace.size,
};
unsigned int idx = event->auxtrace.idx;
if (filter_cpu(session, event->auxtrace.cpu))
return 0;
buffer = zalloc(sizeof(struct auxtrace_buffer));
if (!buffer)
return -ENOMEM;
buffer->pid = -1;
buffer->tid = event->auxtrace.tid;
buffer->cpu = event->auxtrace.cpu;
buffer->data_offset = data_offset;
buffer->offset = event->auxtrace.offset;
buffer->reference = event->auxtrace.reference;
buffer->size = event->auxtrace.size;
idx = event->auxtrace.idx;
err = auxtrace_queues__add_buffer(queues, session, idx, buffer,
buffer_ptr);
if (err)
goto out_err;
return 0;
out_err:
auxtrace_buffer__free(buffer);
return err;
return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
buffer_ptr);
}
static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
......
......@@ -1591,7 +1591,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
drop_rate = (double)stats->total_lost_samples /
(double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
if (drop_rate > 0.05) {
ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
drop_rate * 100.0);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册