提交 9212b13f 编写于 作者: W Weinan Li 提交者: Rodrigo Vivi

drm/i915/gvt: only reset execlist state of one engine during VM engine reset

Only reset vgpu execlist state of the exact engine which gets reset
request from VM. After read context status from HWSP enabled, KMD will use
the saved CSB read pointer but not always read from MMIO. When one engine
reset happen, only the read pointer of this engine will be reset, in GVT-g
host side also need to align with this policy, otherwise VM may get wrong
CSB status after one engine reset compeleted.

v2: Split refine and fix patch, code refine(Zhenyu)
v3: Move active flag of vgpu scheduler into sched_data(Zhenyu)

Cc: Fred Gao <fred.gao@intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: NWeinan Li <weinan.z.li@intel.com>
Signed-off-by: NZhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: NZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: NRodrigo Vivi <rodrigo.vivi@intel.com>
上级 7569a06d
......@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
......@@ -1523,12 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (!enable_execlist)
return 0;
if (s->active)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
ALL_ENGINES,
INTEL_VGPU_EXECLIST_SUBMISSION);
ENGINE_MASK(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
......
......@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
bool active;
ktime_t sched_in_time;
ktime_t sched_out_time;
......@@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
sched_data->period), HRTIMER_MODE_ABS);
vgpu_data->active = true;
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
......@@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->lru_list);
vgpu_data->active = false;
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
......@@ -374,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
......@@ -389,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
if (!vgpu_data->active)
return;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
......
......@@ -1092,17 +1092,17 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
if (WARN_ON(interface >= ARRAY_SIZE(ops)))
return -EINVAL;
if (s->active) {
if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
return -EINVAL;
if (s->active)
s->ops->clean(vgpu, engine_mask);
s->active = false;
gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
vgpu->id, s->ops->name);
}
if (interface == 0) {
s->ops = NULL;
s->virtual_submission_interface = 0;
gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
s->active = false;
gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册