提交 75b974a8 编写于 作者: C Chris Wilson

drm/i915/selftests: Teach igt_gpu_fill_dw() to take intel_context

Avoid having to pass around (ctx, engine) everywhere by passing the
actual intel_context we intend to use. Today we preach this lesson to
igt_gpu_fill_dw and its callers' callers.

The immediate benefit for the GEM selftests is that we aim to use the
GEM context as the control, the source of the engines on which to test
the GEM context.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: NMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823235141.31799-1-chris@chris-wilson.co.uk
上级 77715906
......@@ -879,9 +879,8 @@ static int igt_mock_ppgtt_64K(void *arg)
return err;
}
static int gpu_write(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
static int gpu_write(struct intel_context *ce,
struct i915_vma *vma,
u32 dw,
u32 val)
{
......@@ -893,7 +892,7 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
vma->size >> PAGE_SHIFT, val);
}
......@@ -929,18 +928,16 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
static int __igt_write_huge(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
u32 dword, u32 val)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
vma = i915_vma_instance(obj, vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
......@@ -954,7 +951,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so
* refrain from erroring out.
*/
if (err == -ENOSPC && i915_is_ggtt(vm))
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
goto out_vma_close;
......@@ -964,7 +961,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err)
goto out_vma_unpin;
err = gpu_write(vma, ctx, engine, dword, val);
err = gpu_write(ce, vma, dword, val);
if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin;
......@@ -987,14 +984,13 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
struct i915_gem_engines *engines;
struct i915_gem_engines_iter it;
struct intel_context *ce;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
unsigned int id;
unsigned int count;
u64 max;
u64 num;
u64 size;
......@@ -1008,19 +1004,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64((vm->total - size), max_page_size);
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) {
pr_info("store-dword-imm not supported on engine=%u\n",
id);
count = 0;
max = U64_MAX;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
count++;
if (!intel_engine_can_store_dword(ce->engine))
continue;
}
engines[n++] = engine;
}
max = min(max, ce->vm->total);
n++;
}
i915_gem_context_unlock_engines(ctx);
if (!n)
return 0;
......@@ -1029,23 +1024,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(n * I915_NUM_ENGINES, &prng);
order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);
/*
* Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using
* offset = 0.
*/
i = 0;
engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4;
struct intel_context *ce;
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
ce = engines->engines[order[i] % engines->num_engines];
i = (i + 1) % (count * count);
if (!ce || !intel_engine_can_store_dword(ce->engine))
continue;
/*
* In order to utilize 64K pages we need to both pad the vma
......@@ -1057,22 +1059,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
err = __igt_write_huge(ctx, engine, obj, size, offset_low,
err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1);
if (err)
break;
err = __igt_write_huge(ctx, engine, obj, size, offset_high,
err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
__func__, engine->id, offset_low, offset_high,
"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
__func__, ce->engine->name, offset_low, offset_high,
max_page_size))
break;
}
i915_gem_context_unlock_engines(ctx);
kfree(order);
......@@ -1316,10 +1319,10 @@ static int igt_ppgtt_pin_update(void *arg)
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int n;
int first, last;
int err;
......@@ -1419,14 +1422,18 @@ static int igt_ppgtt_pin_update(void *arg)
*/
n = 0;
for_each_engine(engine, dev_priv, id) {
if (!intel_engine_can_store_dword(engine))
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
goto out_unpin;
break;
}
i915_gem_context_unlock_engines(ctx);
if (err)
goto out_unpin;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
......@@ -1507,8 +1514,8 @@ static int igt_shrink_thp(void *arg)
struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
......@@ -1548,16 +1555,19 @@ static int igt_shrink_thp(void *arg)
goto out_unpin;
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
goto out_unpin;
break;
}
i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
if (err)
goto out_close;
/*
* Now that the pages are *unpinned* shrink-all should invoke
......@@ -1583,10 +1593,9 @@ static int igt_shrink_thp(void *arg)
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
goto out_unpin;
break;
}
out_unpin:
i915_vma_unpin(vma);
out_close:
......
......@@ -166,19 +166,17 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
static int gpu_fill(struct drm_i915_gem_object *obj,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
static int gpu_fill(struct intel_context *ce,
struct drm_i915_gem_object *obj,
unsigned int dw)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma;
int err;
GEM_BUG_ON(obj->base.size > vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
GEM_BUG_ON(obj->base.size > ce->vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
vma = i915_vma_instance(obj, vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
......@@ -200,9 +198,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
* whilst checking that each context provides a unique view
* into the object.
*/
err = igt_gpu_fill_dw(vma,
ctx,
engine,
err = igt_gpu_fill_dw(ce, vma,
(dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)),
real_page_count(obj),
......@@ -305,22 +301,21 @@ static int file_add_object(struct drm_file *file,
}
static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context *ctx,
create_test_object(struct i915_address_space *vm,
struct drm_file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
/* Keep in GEM's good graces */
i915_retire_requests(ctx->i915);
i915_retire_requests(vm->i915);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
......@@ -393,6 +388,7 @@ static int igt_ctx_exec(void *arg)
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
struct intel_context *ce;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
......@@ -400,15 +396,20 @@ static int igt_ctx_exec(void *arg)
goto out_unlock;
}
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (!obj) {
obj = create_test_object(ctx, file, &objects);
obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
intel_context_put(ce);
goto out_unlock;
}
}
err = gpu_fill(obj, ctx, engine, dw);
err = gpu_fill(ce, obj, dw);
intel_context_put(ce);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
......@@ -509,6 +510,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
struct intel_context *ce;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
......@@ -518,22 +520,26 @@ static int igt_shared_ctx_exec(void *arg)
__assign_ppgtt(ctx, parent->vm);
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (!obj) {
obj = create_test_object(parent, file, &objects);
obj = create_test_object(parent->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
}
err = gpu_fill(obj, ctx, engine, dw);
err = gpu_fill(ce, obj, dw);
intel_context_put(ce);
kernel_context_close(ctx);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->vm), err);
kernel_context_close(ctx);
goto out_test;
}
......@@ -544,8 +550,6 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++;
ncontexts++;
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords);
......@@ -604,6 +608,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
......@@ -1082,17 +1088,19 @@ static int igt_ctx_readonly(void *arg)
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine;
unsigned int id;
struct i915_gem_engines_iter it;
struct intel_context *ce;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
if (!obj) {
obj = create_test_object(ctx, file, &objects);
obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
i915_gem_context_unlock_engines(ctx);
goto out_unlock;
}
......@@ -1100,12 +1108,13 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj);
}
err = gpu_fill(obj, ctx, engine, dw);
err = gpu_fill(ce, obj, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
ce->engine->name, ctx->hw_id,
yesno(!!ctx->vm), err);
i915_gem_context_unlock_engines(ctx);
goto out_unlock;
}
......@@ -1115,6 +1124,7 @@ static int igt_ctx_readonly(void *arg)
}
ndwords++;
}
i915_gem_context_unlock_engines(ctx);
}
pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, RUNTIME_INFO(i915)->num_engines);
......@@ -1197,6 +1207,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(engine->gt);
vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
......@@ -1296,6 +1308,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(engine->gt);
vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
......
......@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
......@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma,
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
......@@ -101,40 +104,35 @@ igt_emit_store_dw(struct i915_vma *vma,
return ERR_PTR(err);
}
int igt_gpu_fill_dw(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u64 offset,
unsigned long count,
u32 val)
int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_vma *vma, u64 offset,
unsigned long count, u32 val)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *batch;
unsigned int flags;
int err;
GEM_BUG_ON(vma->size > vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
GEM_BUG_ON(!i915_vma_is_pinned(vma));
batch = igt_emit_store_dw(vma, offset, count, val);
if (IS_ERR(batch))
return PTR_ERR(batch);
rq = igt_request_alloc(ctx, engine);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
if (err)
goto err_request;
......
......@@ -11,9 +11,11 @@
struct i915_request;
struct i915_gem_context;
struct intel_engine_cs;
struct i915_vma;
struct intel_context;
struct intel_engine_cs;
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
......@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma,
unsigned long count,
u32 val);
int igt_gpu_fill_dw(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u64 offset,
unsigned long count,
u32 val);
int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_vma *vma, u64 offset,
unsigned long count, u32 val);
#endif /* __IGT_GEM_UTILS_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册