diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c439461db9af75f9ed368905cf46e9b876eb155f..4364a209c6a5f6fd5ed115306bae3951833d44a5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1657,6 +1657,7 @@ struct i915_execbuffer_params { struct intel_engine_cs *ring; struct drm_i915_gem_object *batch_obj; struct intel_context *ctx; + struct drm_i915_gem_request *request; }; struct drm_i915_private { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 9c9e20ace5e3352fdc6be0688bf24bfc62982701..9d53d787fcf6ef3dca9a0b4e60c9d7ca99a26ecc 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1415,7 +1415,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct i915_address_space *vm; struct i915_execbuffer_params params_master; /* XXX: will be removed later */ struct i915_execbuffer_params *params = ¶ms_master; - struct drm_i915_gem_request *request; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 dispatch_flags; int ret; @@ -1615,7 +1614,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); /* Allocate a request for this batch buffer nice and early. */ - ret = i915_gem_request_alloc(ring, ctx, &request); + ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); if (ret) goto err_batch_unpin; @@ -1649,6 +1648,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_context_unreference(ctx); eb_destroy(eb); + /* + * If the request was created but not successfully submitted then it + * must be freed again. If it was submitted then it is being tracked + * on the active request list and no clean up is required here. + */ + if (ret && params->request) { + i915_gem_request_cancel(params->request); + ring->outstanding_lazy_request = NULL; + } + mutex_unlock(&dev->struct_mutex); pre_mutex_err: