提交 e8a261ea 编写于 作者: C Chris Wilson

drm/i915: Rename request reference/unreference to get/put

Now that we derive requests from struct fence, swap over to its
nomenclature for references. It's shorter and more idiomatic across the
kernel.

s/i915_gem_request_reference/i915_gem_request_get/
s/i915_gem_request_unreference/i915_gem_request_put/
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1469005202-9659-2-git-send-email-chris@chris-wilson.co.uk
Link: http://patchwork.freedesktop.org/patch/msgid/1469017917-15134-1-git-send-email-chris@chris-wilson.co.uk
上级 c13d87ea
......@@ -1422,7 +1422,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL)
return 0;
requests[n++] = i915_gem_request_reference(req);
requests[n++] = i915_gem_request_get(req);
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
......@@ -1431,7 +1431,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL)
continue;
requests[n++] = i915_gem_request_reference(req);
requests[n++] = i915_gem_request_get(req);
}
}
......@@ -1444,7 +1444,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
for (i = 0; i < n; i++) {
if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]);
i915_gem_request_unreference(requests[i]);
i915_gem_request_put(requests[i]);
}
return ret;
......@@ -2820,7 +2820,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (obj->last_read_req[i] == NULL)
continue;
req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
req[n++] = i915_gem_request_get(obj->last_read_req[i]);
}
mutex_unlock(&dev->struct_mutex);
......@@ -2830,7 +2830,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
i915_gem_request_unreference(req[i]);
i915_gem_request_put(req[i]);
}
return ret;
......@@ -3845,14 +3845,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
if (target)
i915_gem_request_reference(target);
i915_gem_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
ret = __i915_wait_request(target, true, NULL, NULL);
i915_gem_request_unreference(target);
i915_gem_request_put(target);
return ret;
}
......
......@@ -181,7 +181,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_unreference(request->ctx);
i915_gem_request_unreference(request);
i915_gem_request_put(request);
}
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
......
......@@ -174,13 +174,13 @@ to_request(struct fence *fence)
}
static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req)
i915_gem_request_get(struct drm_i915_gem_request *req)
{
return to_request(fence_get(&req->fence));
}
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
i915_gem_request_put(struct drm_i915_gem_request *req)
{
fence_put(&req->fence);
}
......@@ -189,10 +189,10 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
if (src)
i915_gem_request_reference(src);
i915_gem_request_get(src);
if (*pdst)
i915_gem_request_unreference(*pdst);
i915_gem_request_put(*pdst);
*pdst = src;
}
......
......@@ -78,7 +78,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
requests[n++] = i915_gem_request_reference(req);
requests[n++] = i915_gem_request_get(req);
}
mutex_unlock(&dev->struct_mutex);
......@@ -89,7 +89,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++)
i915_gem_request_unreference(requests[i]);
i915_gem_request_put(requests[i]);
}
static void cancel_userptr(struct work_struct *work)
......
......@@ -453,7 +453,7 @@ static int intel_breadcrumbs_signaler(void *arg)
rb_erase(&request->signaling.node, &b->signals);
spin_unlock(&b->lock);
i915_gem_request_unreference(request);
i915_gem_request_put(request);
} else {
if (kthread_should_stop())
break;
......@@ -484,7 +484,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
request->signaling.wait.tsk = b->signaler;
request->signaling.wait.seqno = request->fence.seqno;
i915_gem_request_reference(request);
i915_gem_request_get(request);
/* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest
......
......@@ -10954,11 +10954,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
if (work->flip_queued_req)
i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
i915_gem_request_put(work->flip_queued_req);
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
intel_fbc_post_update(crtc);
drm_framebuffer_unreference(work->old_fb);
......
......@@ -441,7 +441,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
i915_gem_request_unreference(req0);
i915_gem_request_put(req0);
req0 = cursor;
} else {
if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
......@@ -514,7 +514,7 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
list_del(&head_req->execlist_link);
i915_gem_request_unreference(head_req);
i915_gem_request_put(head_req);
return 1;
}
......@@ -632,11 +632,11 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
i915_gem_request_unreference(tail_req);
i915_gem_request_put(tail_req);
}
}
i915_gem_request_reference(request);
i915_gem_request_get(request);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
request->ctx_hw_id = request->ctx->hw_id;
if (num_elements == 0)
......@@ -904,7 +904,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
i915_gem_request_put(req);
}
}
......
......@@ -7775,7 +7775,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
if (!i915_gem_request_completed(req))
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
i915_gem_request_unreference(req);
i915_gem_request_put(req);
kfree(boost);
}
......@@ -7793,8 +7793,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
if (boost == NULL)
return;
i915_gem_request_reference(req);
boost->req = req;
boost->req = i915_gem_request_get(req);
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(req->i915->wq, &boost->work);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册