提交 a4b3a571 编写于 作者: D Daniel Vetter

drm/i915: Convert i915_wait_seqno to i915_wait_request

Updated i915_wait_seqno() to take a request structure instead of a seqno value
and renamed it accordingly. Internally, it just pulls the seqno out of the
request and calls on to __wait_seqno() as before. However, all the code further
up the stack is now simplified as it can just pass the request object straight
through without having to peek inside.

For: VIZ-4377
Signed-off-by: NJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: NThomas Daniel <Thomas.Daniel@intel.com>
[danvet: Squash in hunk from an earlier patch which was rebased
wrongly.]
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 9bfc01a2
...@@ -2629,8 +2629,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, ...@@ -2629,8 +2629,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
bool interruptible, bool interruptible,
s64 *timeout, s64 *timeout,
struct drm_i915_file_private *file_priv); struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring, int __must_check i915_wait_request(struct drm_i915_gem_request *req);
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
...@@ -3117,20 +3116,4 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) ...@@ -3117,20 +3116,4 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
} }
} }
/* XXX: Temporary solution to be removed later in patch series. */
static inline int __must_check i915_gem_check_ols(
struct intel_engine_cs *ring, u32 seqno)
{
int ret;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0;
if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
ret = i915_add_request(ring, NULL);
return ret;
}
/* XXX: Temporary solution to be removed later in patch series. */
#endif #endif
...@@ -1308,32 +1308,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, ...@@ -1308,32 +1308,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
} }
/** /**
* Waits for a sequence number to be signaled, and cleans up the * Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event. * request and object lists appropriately for that event.
*/ */
int int
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) i915_wait_request(struct drm_i915_gem_request *req)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv;
bool interruptible = dev_priv->mm.interruptible; bool interruptible;
unsigned reset_counter; unsigned reset_counter;
int ret; int ret;
BUG_ON(req == NULL);
dev = req->ring->dev;
dev_priv = dev->dev_private;
interruptible = dev_priv->mm.interruptible;
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_check_ols(ring, seqno); ret = i915_gem_check_olr(req);
if (ret) if (ret)
return ret; return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
return __i915_wait_seqno(ring, seqno, reset_counter, interruptible, i915_gem_request_reference(req);
NULL, NULL); ret = __i915_wait_seqno(req->ring, i915_gem_request_get_seqno(req),
reset_counter, interruptible, NULL, NULL);
i915_gem_request_unreference(req);
return ret;
} }
static int static int
...@@ -1363,18 +1371,13 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, ...@@ -1363,18 +1371,13 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly) bool readonly)
{ {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
struct intel_engine_cs *ring = obj->ring;
u32 seqno;
int ret; int ret;
req = readonly ? obj->last_write_req : obj->last_read_req; req = readonly ? obj->last_write_req : obj->last_read_req;
if (!req) if (!req)
return 0; return 0;
seqno = i915_gem_request_get_seqno(req); ret = i915_wait_request(req);
WARN_ON(seqno == 0);
ret = i915_wait_seqno(ring, seqno);
if (ret) if (ret)
return ret; return ret;
...@@ -3332,8 +3335,7 @@ static int ...@@ -3332,8 +3335,7 @@ static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{ {
if (obj->last_fenced_req) { if (obj->last_fenced_req) {
int ret = i915_wait_seqno(obj->ring, int ret = i915_wait_request(obj->last_fenced_req);
i915_gem_request_get_seqno(obj->last_fenced_req));
if (ret) if (ret)
return ret; return ret;
......
...@@ -922,7 +922,6 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, ...@@ -922,7 +922,6 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret; int ret;
if (ringbuf->last_retired_head != -1) { if (ringbuf->last_retired_head != -1) {
...@@ -947,15 +946,14 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, ...@@ -947,15 +946,14 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
/* Would completion of this request free enough space? */ /* Would completion of this request free enough space? */
if (__intel_ring_space(request->tail, ringbuf->tail, if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) { ringbuf->size) >= bytes) {
seqno = request->seqno;
break; break;
} }
} }
if (seqno == 0) if (&request->list == &ring->request_list)
return -ENOSPC; return -ENOSPC;
ret = i915_wait_seqno(ring, seqno); ret = i915_wait_request(request);
if (ret) if (ret)
return ret; return ret;
......
...@@ -224,8 +224,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, ...@@ -224,8 +224,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
return ret; return ret;
overlay->flip_tail = tail; overlay->flip_tail = tail;
ret = i915_wait_seqno(ring, ret = i915_wait_request(overlay->last_flip_req);
i915_gem_request_get_seqno(overlay->last_flip_req));
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
...@@ -367,19 +366,15 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -367,19 +366,15 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* We have to be careful not to repeat work forever an make forward progess. */ * We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{ {
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret; int ret;
if (overlay->last_flip_req == NULL) if (overlay->last_flip_req == NULL)
return 0; return 0;
ret = i915_wait_seqno(ring, ret = i915_wait_request(overlay->last_flip_req);
i915_gem_request_get_seqno(overlay->last_flip_req));
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail) if (overlay->flip_tail)
overlay->flip_tail(overlay); overlay->flip_tail(overlay);
......
...@@ -1899,7 +1899,6 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ...@@ -1899,7 +1899,6 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer; struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret; int ret;
if (ringbuf->last_retired_head != -1) { if (ringbuf->last_retired_head != -1) {
...@@ -1914,15 +1913,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ...@@ -1914,15 +1913,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail, if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) { ringbuf->size) >= n) {
seqno = request->seqno;
break; break;
} }
} }
if (seqno == 0) if (&request->list == &ring->request_list)
return -ENOSPC; return -ENOSPC;
ret = i915_wait_seqno(ring, seqno); ret = i915_wait_request(request);
if (ret) if (ret)
return ret; return ret;
...@@ -2011,7 +2009,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) ...@@ -2011,7 +2009,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
int intel_ring_idle(struct intel_engine_cs *ring) int intel_ring_idle(struct intel_engine_cs *ring)
{ {
u32 seqno; struct drm_i915_gem_request *req;
int ret; int ret;
/* We need to add any requests required to flush the objects and ring */ /* We need to add any requests required to flush the objects and ring */
...@@ -2025,11 +2023,11 @@ int intel_ring_idle(struct intel_engine_cs *ring) ...@@ -2025,11 +2023,11 @@ int intel_ring_idle(struct intel_engine_cs *ring)
if (list_empty(&ring->request_list)) if (list_empty(&ring->request_list))
return 0; return 0;
seqno = list_entry(ring->request_list.prev, req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request, struct drm_i915_gem_request,
list)->seqno; list);
return i915_wait_seqno(ring, seqno); return i915_wait_request(req);
} }
static int static int
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册