提交 a7e02199 编写于 作者: A Alex Dai 提交者: Daniel Vetter

drm/i915/guc: Move GuC wq_check_space to alloc_request_extras

Split GuC work queue space checking from submission and move it to
ring_alloc_request_extras. The reason is that failure in later
i915_add_request() won't be handled. In the case timeout happens,
driver can return early in order to handle the error.

v1: Move wq_reserve_space to ring_reserve_space
v2: Move wq_reserve_space to alloc_request_extras (Chris Wilson)
v3: The work queue head pointer is cached by driver now. So we can
    quickly return if space is available.
    s/reserve/check/g (Dave Gordon)
v4: Update cached wq head after ring doorbell; check wq space before
    ring doorbell in case unexpected error happens; call wq space
    check only when GuC submission is enabled. (Dave Gordon)
Signed-off-by: NAlex Dai <yu.dai@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1450295155-10050-1-git-send-email-yu.dai@intel.comReviewed-by: NDave Gordon <david.s.gordon@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 c1a415e2
...@@ -244,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) ...@@ -244,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1; db_exc.cookie = 1;
} }
/* Finally, update the cached copy of the GuC's WQ head */
gc->wq_head = desc->head;
kunmap_atomic(base); kunmap_atomic(base);
return ret; return ret;
} }
...@@ -469,28 +472,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, ...@@ -469,28 +472,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index); sizeof(desc) * client->ctx_index);
} }
/* Get valid workqueue item and return it back to offset */ int i915_guc_wq_check_space(struct i915_guc_client *gc)
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
{ {
struct guc_process_desc *desc; struct guc_process_desc *desc;
void *base; void *base;
u32 size = sizeof(struct guc_wq_item); u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200; int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
/* Quickly return if wq space is available since last time we cache the
* head position. */
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
return 0;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset; desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) { while (timeout_counter-- > 0) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { gc->wq_head = desc->head;
*offset = gc->wq_tail;
/* advance the tail for next workqueue item */
gc->wq_tail += size;
gc->wq_tail &= gc->wq_size - 1;
/* this will break the loop */ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
timeout_counter = 0;
ret = 0; ret = 0;
break;
} }
if (timeout_counter) if (timeout_counter)
...@@ -508,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, ...@@ -508,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
enum intel_ring_id ring_id = rq->ring->id; enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi; struct guc_wq_item *wqi;
void *base; void *base;
u32 tail, wq_len, wq_off = 0; u32 tail, wq_len, wq_off, space;
int ret;
ret = guc_get_workqueue_space(gc, &wq_off); space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
if (ret) if (WARN_ON(space < sizeof(struct guc_wq_item)))
return ret; return -ENOSPC; /* shouldn't happen */
/* postincrement WQ tail for next time */
wq_off = gc->wq_tail;
gc->wq_tail += sizeof(struct guc_wq_item);
gc->wq_tail &= gc->wq_size - 1;
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither * should not have the case where structure wqi is across page, neither
......
...@@ -43,6 +43,7 @@ struct i915_guc_client { ...@@ -43,6 +43,7 @@ struct i915_guc_client {
uint32_t wq_offset; uint32_t wq_offset;
uint32_t wq_size; uint32_t wq_size;
uint32_t wq_tail; uint32_t wq_tail;
uint32_t wq_head;
/* GuC submission statistics & status */ /* GuC submission statistics & status */
uint64_t submissions[I915_NUM_RINGS]; uint64_t submissions[I915_NUM_RINGS];
...@@ -122,5 +123,6 @@ int i915_guc_submit(struct i915_guc_client *client, ...@@ -122,5 +123,6 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq); struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev); void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev); void i915_guc_submission_fini(struct drm_device *dev);
int i915_guc_wq_check_space(struct i915_guc_client *client);
#endif #endif
...@@ -670,6 +670,19 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request ...@@ -670,6 +670,19 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return ret; return ret;
} }
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
struct intel_guc *guc = &request->i915->guc;
ret = i915_guc_wq_check_space(guc->execbuf_client);
if (ret)
return ret;
}
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册