提交 74328ee5 编写于 作者: J John Harrison 提交者: Daniel Vetter

drm/i915: Convert trace functions from seqno to request

All the code above is now using requests not seqnos so it is possible to convert
the trace functions across. Note that rather than get into problematic reference
counting issues, the trace code only saves the seqno and ring values from the
request structure not the structure pointer itself.

For: VIZ-4377
Signed-off-by: NJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: NThomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 f06cc1b9
...@@ -1241,8 +1241,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1241,8 +1241,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
return -ENODEV; return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */ /* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(i915_gem_request_get_ring(req), trace_i915_gem_request_wait_begin(req);
i915_gem_request_get_seqno(req));
before = ktime_get_raw_ns(); before = ktime_get_raw_ns();
for (;;) { for (;;) {
struct timer_list timer; struct timer_list timer;
...@@ -1294,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1294,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
} }
} }
now = ktime_get_raw_ns(); now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(i915_gem_request_get_ring(req), trace_i915_gem_request_wait_end(req);
i915_gem_request_get_seqno(req));
if (!irq_test_in_progress) if (!irq_test_in_progress)
ring->irq_put(ring); ring->irq_put(ring);
...@@ -2500,7 +2498,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2500,7 +2498,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
} }
trace_i915_gem_request_add(ring, request->seqno); trace_i915_gem_request_add(request);
ring->outstanding_lazy_request = NULL; ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev); i915_queue_hangcheck(ring->dev);
...@@ -2776,7 +2774,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) ...@@ -2776,7 +2774,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
if (!i915_seqno_passed(seqno, request->seqno)) if (!i915_seqno_passed(seqno, request->seqno))
break; break;
trace_i915_gem_request_retire(ring, request->seqno); trace_i915_gem_request_retire(request);
/* This is one of the few common intersection points /* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists: * between legacy ringbuffer submission and execlists:
...@@ -3006,7 +3004,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3006,7 +3004,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
trace_i915_gem_ring_sync_to(from, to, seqno); trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
ret = to->semaphore.sync_to(to, from, seqno); ret = to->semaphore.sync_to(to, from, seqno);
if (!ret) if (!ret)
/* We use last_read_req because sync_to() /* We use last_read_req because sync_to()
......
...@@ -1211,9 +1211,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, ...@@ -1211,9 +1211,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret; return ret;
} }
trace_i915_gem_ring_dispatch(ring, trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_request_get_seqno(intel_ring_get_request(ring)),
flags);
i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
......
...@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm, ...@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
TRACE_EVENT(i915_gem_ring_sync_to, TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from, TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to, struct intel_engine_cs *to,
u32 seqno), struct drm_i915_gem_request *req),
TP_ARGS(from, to, seqno), TP_ARGS(from, to, req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->dev->primary->index; __entry->dev = from->dev->primary->index;
__entry->sync_from = from->id; __entry->sync_from = from->id;
__entry->sync_to = to->id; __entry->sync_to = to->id;
__entry->seqno = seqno; __entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
...@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
); );
TRACE_EVENT(i915_gem_ring_dispatch, TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags), TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
TP_ARGS(ring, seqno, flags), TP_ARGS(req, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags; __entry->flags = flags;
i915_trace_irq_get(ring, seqno); i915_trace_irq_get(ring, __entry->seqno);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
...@@ -398,8 +400,8 @@ TRACE_EVENT(i915_gem_ring_flush, ...@@ -398,8 +400,8 @@ TRACE_EVENT(i915_gem_ring_flush,
); );
DECLARE_EVENT_CLASS(i915_gem_request, DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno), TP_ARGS(req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -408,9 +410,11 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -408,9 +410,11 @@ DECLARE_EVENT_CLASS(i915_gem_request,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
...@@ -418,8 +422,8 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -418,8 +422,8 @@ DECLARE_EVENT_CLASS(i915_gem_request,
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_add, DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno) TP_ARGS(req)
); );
TRACE_EVENT(i915_gem_request_complete, TRACE_EVENT(i915_gem_request_complete,
...@@ -443,13 +447,13 @@ TRACE_EVENT(i915_gem_request_complete, ...@@ -443,13 +447,13 @@ TRACE_EVENT(i915_gem_request_complete,
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno) TP_ARGS(req)
); );
TRACE_EVENT(i915_gem_request_wait_begin, TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno), TP_ARGS(req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -465,10 +469,13 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -465,10 +469,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable. * less desirable.
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); __entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
...@@ -477,8 +484,8 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -477,8 +484,8 @@ TRACE_EVENT(i915_gem_request_wait_begin,
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno) TP_ARGS(req)
); );
DECLARE_EVENT_CLASS(i915_ring, DECLARE_EVENT_CLASS(i915_ring,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册