提交 e09e903a 编写于 作者: M Maarten Lankhorst 提交者: Daniel Vetter

drm/i915/selftests: Prepare execlists and lrc selftests for obj->mm.lock removal

Convert normal functions to unlocked versions where needed.
Signed-off-by: NMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: NThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-54-maarten.lankhorst@linux.intel.com
上级 17b7ab92
...@@ -989,7 +989,7 @@ static int live_timeslice_preempt(void *arg) ...@@ -989,7 +989,7 @@ static int live_timeslice_preempt(void *arg)
goto err_obj; goto err_obj;
} }
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr); err = PTR_ERR(vaddr);
goto err_obj; goto err_obj;
...@@ -1297,7 +1297,7 @@ static int live_timeslice_queue(void *arg) ...@@ -1297,7 +1297,7 @@ static int live_timeslice_queue(void *arg)
goto err_obj; goto err_obj;
} }
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr); err = PTR_ERR(vaddr);
goto err_obj; goto err_obj;
...@@ -1544,7 +1544,7 @@ static int live_busywait_preempt(void *arg) ...@@ -1544,7 +1544,7 @@ static int live_busywait_preempt(void *arg)
goto err_ctx_lo; goto err_ctx_lo;
} }
map = i915_gem_object_pin_map(obj, I915_MAP_WC); map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) { if (IS_ERR(map)) {
err = PTR_ERR(map); err = PTR_ERR(map);
goto err_obj; goto err_obj;
...@@ -2714,7 +2714,7 @@ static int create_gang(struct intel_engine_cs *engine, ...@@ -2714,7 +2714,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (err) if (err)
goto err_obj; goto err_obj;
cs = i915_gem_object_pin_map(obj, I915_MAP_WC); cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
err = PTR_ERR(cs); err = PTR_ERR(cs);
goto err_obj; goto err_obj;
...@@ -2997,7 +2997,7 @@ static int live_preempt_gang(void *arg) ...@@ -2997,7 +2997,7 @@ static int live_preempt_gang(void *arg)
* it will terminate the next lowest spinner until there * it will terminate the next lowest spinner until there
* are no more spinners and the gang is complete. * are no more spinners and the gang is complete.
*/ */
cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
if (!IS_ERR(cs)) { if (!IS_ERR(cs)) {
*cs = 0; *cs = 0;
i915_gem_object_unpin_map(rq->batch->obj); i915_gem_object_unpin_map(rq->batch->obj);
...@@ -3062,7 +3062,7 @@ create_gpr_user(struct intel_engine_cs *engine, ...@@ -3062,7 +3062,7 @@ create_gpr_user(struct intel_engine_cs *engine,
return ERR_PTR(err); return ERR_PTR(err);
} }
cs = i915_gem_object_pin_map(obj, I915_MAP_WC); cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_vma_put(vma); i915_vma_put(vma);
return ERR_CAST(cs); return ERR_CAST(cs);
...@@ -3269,7 +3269,7 @@ static int live_preempt_user(void *arg) ...@@ -3269,7 +3269,7 @@ static int live_preempt_user(void *arg)
if (IS_ERR(global)) if (IS_ERR(global))
return PTR_ERR(global); return PTR_ERR(global);
result = i915_gem_object_pin_map(global->obj, I915_MAP_WC); result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
if (IS_ERR(result)) { if (IS_ERR(result)) {
i915_vma_unpin_and_release(&global, 0); i915_vma_unpin_and_release(&global, 0);
return PTR_ERR(result); return PTR_ERR(result);
...@@ -3658,7 +3658,7 @@ static int live_preempt_smoke(void *arg) ...@@ -3658,7 +3658,7 @@ static int live_preempt_smoke(void *arg)
goto err_free; goto err_free;
} }
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
err = PTR_ERR(cs); err = PTR_ERR(cs);
goto err_batch; goto err_batch;
...@@ -4263,7 +4263,7 @@ static int preserved_virtual_engine(struct intel_gt *gt, ...@@ -4263,7 +4263,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
goto out_end; goto out_end;
} }
cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
err = PTR_ERR(cs); err = PTR_ERR(cs);
goto out_end; goto out_end;
......
...@@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine, ...@@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
goto err_rq; goto err_rq;
} }
cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
err = PTR_ERR(cs); err = PTR_ERR(cs);
goto err_rq; goto err_rq;
...@@ -921,7 +921,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) ...@@ -921,7 +921,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
if (IS_ERR(batch)) if (IS_ERR(batch))
return batch; return batch;
cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_vma_put(batch); i915_vma_put(batch);
return ERR_CAST(cs); return ERR_CAST(cs);
...@@ -1085,7 +1085,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) ...@@ -1085,7 +1085,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
if (IS_ERR(batch)) if (IS_ERR(batch))
return batch; return batch;
cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_vma_put(batch); i915_vma_put(batch);
return ERR_CAST(cs); return ERR_CAST(cs);
...@@ -1199,29 +1199,29 @@ static int compare_isolation(struct intel_engine_cs *engine, ...@@ -1199,29 +1199,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
u32 *defaults; u32 *defaults;
int err = 0; int err = 0;
A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC); A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
if (IS_ERR(A[0])) if (IS_ERR(A[0]))
return PTR_ERR(A[0]); return PTR_ERR(A[0]);
A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC); A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
if (IS_ERR(A[1])) { if (IS_ERR(A[1])) {
err = PTR_ERR(A[1]); err = PTR_ERR(A[1]);
goto err_A0; goto err_A0;
} }
B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC); B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
if (IS_ERR(B[0])) { if (IS_ERR(B[0])) {
err = PTR_ERR(B[0]); err = PTR_ERR(B[0]);
goto err_A1; goto err_A1;
} }
B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC); B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
if (IS_ERR(B[1])) { if (IS_ERR(B[1])) {
err = PTR_ERR(B[1]); err = PTR_ERR(B[1]);
goto err_B0; goto err_B0;
} }
lrc = i915_gem_object_pin_map(ce->state->obj, lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
i915_coherent_map_type(engine->i915)); i915_coherent_map_type(engine->i915));
if (IS_ERR(lrc)) { if (IS_ERR(lrc)) {
err = PTR_ERR(lrc); err = PTR_ERR(lrc);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册