提交 c754936f 编写于 作者: X Xiaoguang Chen 提交者: Zhenyu Wang

drm/i915/gvt: use kmap instead of kmap_atomic around guest memory access

kmap_atomic doesn't allow sleep until unmapped. However,
it's necessary to allow sleep during reading/writing guest
memory, so use kmap instead.
Signed-off-by: NBing Niu <bing.niu@intel.com>
Signed-off-by: NXiaoguang Chen <xiaoguang.chen@intel.com>
Signed-off-by: NJike Song <jike.song@intel.com>
Signed-off-by: NZhenyu Wang <zhenyuw@linux.intel.com>
上级 9baf0920
...@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
} }
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
dst = kmap_atomic(page); dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE); GTT_PAGE_SIZE);
kunmap_atomic(dst); kunmap(page);
i++; i++;
} }
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page); shadow_ring_context = kmap(page);
#define COPY_REG(name) \ #define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
...@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context); kunmap(page);
return 0; return 0;
} }
...@@ -318,10 +318,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -318,10 +318,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
} }
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
src = kmap_atomic(page); src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE); GTT_PAGE_SIZE);
kunmap_atomic(src); kunmap(page);
i++; i++;
} }
...@@ -329,7 +329,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -329,7 +329,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page); shadow_ring_context = kmap(page);
#define COPY_REG(name) \ #define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
...@@ -347,7 +347,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -347,7 +347,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context); kunmap(page);
} }
static void complete_current_workload(struct intel_gvt *gvt, int ring_id) static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册