提交 0fac21e7 编写于 作者: Z Zhenyu Wang

drm/i915/gvt: properly access enabled intel_engine_cs

Switch to use new for_each_engine() helper to properly access
enabled intel_engine_cs as i915 core has changed that to be
dynamic managed. At GVT-g init time would still depend on ring
mask to determine engine list as it's earlier.
Signed-off-by: NZhenyu Wang <zhenyuw@linux.intel.com>
上级 3eec8722
...@@ -817,10 +817,11 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) ...@@ -817,10 +817,11 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{ {
int i; enum intel_engine_id i;
struct intel_engine_cs *engine;
/* each ring has a virtual execlist engine */ /* each ring has a virtual execlist engine */
for (i = 0; i < I915_NUM_ENGINES; i++) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
init_vgpu_execlist(vgpu, i); init_vgpu_execlist(vgpu, i);
INIT_LIST_HEAD(&vgpu->workload_q_head[i]); INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
} }
......
...@@ -132,12 +132,13 @@ static int new_mmio_info(struct intel_gvt *gvt, ...@@ -132,12 +132,13 @@ static int new_mmio_info(struct intel_gvt *gvt,
static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
{ {
int i; enum intel_engine_id id;
struct intel_engine_cs *engine;
reg &= ~GENMASK(11, 0); reg &= ~GENMASK(11, 0);
for (i = 0; i < I915_NUM_ENGINES; i++) { for_each_engine(engine, gvt->dev_priv, id) {
if (gvt->dev_priv->engine[i]->mmio_base == reg) if (engine->mmio_base == reg)
return i; return id;
} }
return -1; return -1;
} }
...@@ -1306,7 +1307,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1306,7 +1307,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ret; int ret;
if (WARN_ON(ring_id < 0)) if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL; return -EINVAL;
execlist = &vgpu->execlist[ring_id]; execlist = &vgpu->execlist[ring_id];
......
...@@ -37,9 +37,10 @@ ...@@ -37,9 +37,10 @@
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
{ {
struct intel_vgpu_execlist *execlist; struct intel_vgpu_execlist *execlist;
int i; enum intel_engine_id i;
struct intel_engine_cs *engine;
for (i = 0; i < I915_NUM_ENGINES; i++) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
execlist = &vgpu->execlist[i]; execlist = &vgpu->execlist[i];
if (!list_empty(workload_q_head(vgpu, i))) if (!list_empty(workload_q_head(vgpu, i)))
return true; return true;
...@@ -51,7 +52,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) ...@@ -51,7 +52,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{ {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i; enum intel_engine_id i;
struct intel_engine_cs *engine;
/* no target to schedule */ /* no target to schedule */
if (!scheduler->next_vgpu) if (!scheduler->next_vgpu)
...@@ -67,7 +69,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) ...@@ -67,7 +69,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true; scheduler->need_reschedule = true;
/* still have uncompleted workload? */ /* still have uncompleted workload? */
for (i = 0; i < I915_NUM_ENGINES; i++) { for_each_engine(engine, gvt->dev_priv, i) {
if (scheduler->current_workload[i]) { if (scheduler->current_workload[i]) {
gvt_dbg_sched("still have running workload\n"); gvt_dbg_sched("still have running workload\n");
return; return;
...@@ -84,7 +86,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) ...@@ -84,7 +86,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false; scheduler->need_reschedule = false;
/* wake up workload dispatch thread */ /* wake up workload dispatch thread */
for (i = 0; i < I915_NUM_ENGINES; i++) for_each_engine(engine, gvt->dev_priv, i)
wake_up(&scheduler->waitq[i]); wake_up(&scheduler->waitq[i]);
} }
......
...@@ -510,6 +510,10 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) ...@@ -510,6 +510,10 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq); init_waitqueue_head(&scheduler->workload_complete_wq);
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
/* check ring mask at init time */
if (!HAS_ENGINE(gvt->dev_priv, i))
continue;
init_waitqueue_head(&scheduler->waitq[i]); init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL); param = kzalloc(sizeof(*param), GFP_KERNEL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册