提交 f52c380a 编写于 作者: Z Zhi Wang 提交者: Zhenyu Wang

drm/i915/gvt: Refine shadow batch buffer

1) Use standard i915 GEM object sequence to access the shadow batch buffer.
2) Manage i915 vma life cycle to solve one FIXME.

v2:
- Refine code structure.
- Refine the usage of GEM APIs.
- Add the missing lock/unlock in release_shadow_batch_buffer.

Test on my SKL NuC.
Signed-off-by: NZhi Wang <zhi.a.wang@intel.com>
上级 58facf8c
...@@ -1645,11 +1645,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) ...@@ -1645,11 +1645,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
static int perform_bb_shadow(struct parser_exec_state *s) static int perform_bb_shadow(struct parser_exec_state *s)
{ {
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0; unsigned long gma = 0;
unsigned long bb_size; unsigned long bb_size;
void *dst = NULL;
int ret = 0; int ret = 0;
/* get the start gm address of the batch buffer */ /* get the start gm address of the batch buffer */
...@@ -1657,51 +1656,51 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1657,51 +1656,51 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (gma == INTEL_GVT_INVALID_ADDR) if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT; return -EFAULT;
/* get the size of the batch buffer */
ret = find_bb_size(s, &bb_size); ret = find_bb_size(s, &bb_size);
if (ret) if (ret)
return ret; return ret;
/* allocate shadow batch buffer */ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); if (!bb)
if (entry_obj == NULL)
return -ENOMEM; return -ENOMEM;
entry_obj->obj = bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
i915_gem_object_create(s->vgpu->gvt->dev_priv, roundup(bb_size, PAGE_SIZE));
roundup(bb_size, PAGE_SIZE)); if (IS_ERR(bb->obj)) {
if (IS_ERR(entry_obj->obj)) { ret = PTR_ERR(bb->obj);
ret = PTR_ERR(entry_obj->obj); goto err_free_bb;
goto free_entry;
} }
entry_obj->len = bb_size;
INIT_LIST_HEAD(&entry_obj->list);
dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB); ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
if (IS_ERR(dst)) { if (ret)
ret = PTR_ERR(dst); goto err_free_obj;
goto put_obj;
}
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
if (ret) { if (IS_ERR(bb->va)) {
gvt_vgpu_err("failed to set shadow batch to CPU\n"); ret = PTR_ERR(bb->va);
goto unmap_src; goto err_finish_shmem_access;
} }
entry_obj->va = dst; if (bb->clflush & CLFLUSH_BEFORE) {
entry_obj->bb_start_cmd_va = s->ip_va; drm_clflush_virt_range(bb->va, bb->obj->base.size);
bb->clflush &= ~CLFLUSH_BEFORE;
}
/* copy batch buffer to shadow batch buffer*/
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size, gma, gma + bb_size,
dst); bb->va);
if (ret < 0) { if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src; ret = -EFAULT;
goto err_unmap;
} }
list_add(&entry_obj->list, &s->workload->shadow_bb); INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va;
/* /*
* ip_va saves the virtual address of the shadow batch buffer, while * ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer. * ip_gma saves the graphics address of the original batch buffer.
...@@ -1710,17 +1709,17 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1710,17 +1709,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow * buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early). * buffer here (too early).
*/ */
s->ip_va = dst; s->ip_va = bb->va;
s->ip_gma = gma; s->ip_gma = gma;
return 0; return 0;
err_unmap:
unmap_src: i915_gem_object_unpin_map(bb->obj);
i915_gem_object_unpin_map(entry_obj->obj); err_finish_shmem_access:
put_obj: i915_gem_obj_finish_shmem_access(bb->obj);
i915_gem_object_put(entry_obj->obj); err_free_obj:
free_entry: i915_gem_object_put(bb->obj);
kfree(entry_obj); err_free_bb:
kfree(bb);
return ret; return ret;
} }
...@@ -1762,7 +1761,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) ...@@ -1762,7 +1761,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
return ret; return ret;
} }
......
...@@ -325,31 +325,46 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -325,31 +325,46 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
return ret; return ret;
} }
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ {
struct intel_gvt *gvt = workload->vgpu->gvt; struct intel_gvt *gvt = workload->vgpu->gvt;
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj; struct intel_vgpu_shadow_bb *bb;
int ret;
/* pin the gem object to ggtt */ list_for_each_entry(bb, &workload->shadow_bb, list) {
list_for_each_entry(entry_obj, &workload->shadow_bb, list) { bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
struct i915_vma *vma; if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
goto err;
}
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); /* relocate shadow batch buffer */
if (IS_ERR(vma)) bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
return PTR_ERR(vma); if (gmadr_bytes == 8)
bb->bb_start_cmd_va[2] = 0;
/* FIXME: we are not tracking our pinned VMA leaving it /* No one is going to touch shadow bb from now on. */
* up to the core to fix up the stray pin_count upon if (bb->clflush & CLFLUSH_AFTER) {
* free. drm_clflush_virt_range(bb->va, bb->obj->base.size);
*/ bb->clflush &= ~CLFLUSH_AFTER;
}
/* update the relocate gma with shadow batch buffer*/ ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); if (ret)
if (gmadr_bytes == 8) goto err;
entry_obj->bb_start_cmd_va[2] = 0;
i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
i915_vma_move_to_active(bb->vma, workload->req, 0);
} }
return 0; return 0;
err:
release_shadow_batch_buffer(workload);
return ret;
} }
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
...@@ -410,22 +425,37 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -410,22 +425,37 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ {
/* release all the shadow batch buffer */ struct intel_vgpu *vgpu = workload->vgpu;
if (!list_empty(&workload->shadow_bb)) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_shadow_bb_entry *entry_obj = struct intel_vgpu_shadow_bb *bb, *pos;
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry, if (list_empty(&workload->shadow_bb))
list); return;
struct intel_shadow_bb_entry *temp;
bb = list_first_entry(&workload->shadow_bb,
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, struct intel_vgpu_shadow_bb, list);
list) {
i915_gem_object_unpin_map(entry_obj->obj); mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_object_put(entry_obj->obj);
list_del(&entry_obj->list); list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
kfree(entry_obj); if (bb->obj) {
if (bb->accessing)
i915_gem_obj_finish_shmem_access(bb->obj);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
if (bb->vma && !IS_ERR(bb->vma)) {
i915_vma_unpin(bb->vma);
i915_vma_close(bb->vma);
}
__i915_gem_object_release_unless_active(bb->obj);
} }
list_del(&bb->list);
kfree(bb);
} }
mutex_unlock(&dev_priv->drm.struct_mutex);
} }
static int prepare_workload(struct intel_vgpu_workload *workload) static int prepare_workload(struct intel_vgpu_workload *workload)
......
...@@ -112,13 +112,14 @@ struct intel_vgpu_workload { ...@@ -112,13 +112,14 @@ struct intel_vgpu_workload {
struct intel_shadow_wa_ctx wa_ctx; struct intel_shadow_wa_ctx wa_ctx;
}; };
/* Intel shadow batch buffer is a i915 gem object */ struct intel_vgpu_shadow_bb {
struct intel_shadow_bb_entry {
struct list_head list; struct list_head list;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void *va; void *va;
unsigned long len;
u32 *bb_start_cmd_va; u32 *bb_start_cmd_va;
unsigned int clflush;
bool accessing;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册