提交 4f16749f 编写于 作者: A Andrzej Hajda 提交者: Andi Shyti

drm/i915/selftest: use igt_vma_move_to_active_unlocked if possible

Helper replaces common sequence of calls.
Signed-off-by: NAndrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: NAndi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: NAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221213121951.1515023-2-andrzej.hajda@intel.com
上级 f350c74f
...@@ -1551,9 +1551,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1551,9 +1551,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1686,9 +1684,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1686,9 +1684,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
......
...@@ -130,15 +130,11 @@ int igt_gpu_fill_dw(struct intel_context *ce, ...@@ -130,15 +130,11 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch; goto err_batch;
} }
i915_vma_lock(batch); err = igt_vma_move_to_active_unlocked(batch, rq, 0);
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
......
...@@ -2763,13 +2763,11 @@ static int create_gang(struct intel_engine_cs *engine, ...@@ -2763,13 +2763,11 @@ static int create_gang(struct intel_engine_cs *engine,
rq->batch = i915_vma_get(vma); rq->batch = i915_vma_get(vma);
i915_request_get(rq); i915_request_get(rq);
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
if (!err) if (!err)
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
i915_vma_offset(vma), i915_vma_offset(vma),
PAGE_SIZE, 0); PAGE_SIZE, 0);
i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
if (err) if (err)
goto err_rq; goto err_rq;
...@@ -3177,9 +3175,7 @@ create_gpr_client(struct intel_engine_cs *engine, ...@@ -3177,9 +3175,7 @@ create_gpr_client(struct intel_engine_cs *engine,
goto out_batch; goto out_batch;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
i915_vma_lock(batch); i915_vma_lock(batch);
if (!err) if (!err)
...@@ -3514,13 +3510,11 @@ static int smoke_submit(struct preempt_smoke *smoke, ...@@ -3514,13 +3510,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
} }
if (vma) { if (vma) {
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
if (!err) if (!err)
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
i915_vma_offset(vma), i915_vma_offset(vma),
PAGE_SIZE, 0); PAGE_SIZE, 0);
i915_vma_unlock(vma);
} }
i915_request_add(rq); i915_request_add(rq);
......
...@@ -599,9 +599,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot) ...@@ -599,9 +599,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
*cs++ = 0; *cs++ = 0;
} }
i915_vma_lock(scratch); err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(scratch);
i915_request_get(rq); i915_request_get(rq);
i915_request_add(rq); i915_request_add(rq);
......
...@@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg, ...@@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg,
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
/* Read the mocs tables back using SRM */ /* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma); offset = i915_ggtt_offset(vma);
......
...@@ -138,9 +138,7 @@ read_nonprivs(struct intel_context *ce) ...@@ -138,9 +138,7 @@ read_nonprivs(struct intel_context *ce)
goto err_pin; goto err_pin;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto err_req; goto err_req;
...@@ -853,9 +851,7 @@ static int read_whitelisted_registers(struct intel_context *ce, ...@@ -853,9 +851,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
i915_vma_lock(results); err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(results);
if (err) if (err)
goto err_req; goto err_req;
...@@ -935,9 +931,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce) ...@@ -935,9 +931,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
goto err_request; goto err_request;
} }
i915_vma_lock(batch); err = igt_vma_move_to_active_unlocked(batch, rq, 0);
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto err_request; goto err_request;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册