diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 7455214b242c8120ddc9b605cdf661b3d8ad53eb..fe7f5ae8884af0e8b4c6d7b7af537c989a17e8c6 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -138,6 +138,10 @@ struct intel_vgpu_display { struct intel_vgpu_sbi sbi; }; +struct vgpu_sched_ctl { + int weight; +}; + struct intel_vgpu { struct intel_gvt *gvt; int id; @@ -160,6 +164,7 @@ struct intel_vgpu { struct list_head workload_q_head[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; + ktime_t last_ctx_submit_time; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0b41d19ae09a7f4ad006c590c58900e7a1422548..68600a3c46e5b6ed020d67d3253e24670f29d205 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1416,6 +1416,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { + vgpu->last_ctx_submit_time = ktime_get(); ret = intel_vgpu_submit_execlist(vgpu, ring_id); if(ret) gvt_vgpu_err("fail submit workload on ring %d\n", diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index c8ade8fc511d825a0b577f4756ead55d12261c25..ff19abc6f77edeb86829c6b7358753efa31ecc38 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -47,11 +47,33 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) return false; } +struct vgpu_sched_data { + struct list_head list; + struct intel_vgpu *vgpu; + + ktime_t sched_in_time; + ktime_t sched_out_time; + ktime_t sched_time; + ktime_t left_ts; + ktime_t allocated_ts; + + struct vgpu_sched_ctl sched_ctl; +}; + +struct gvt_sched_data { + struct intel_gvt *gvt; + struct hrtimer timer; + unsigned long period; + struct list_head runq_head; +}; + static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id i; struct intel_engine_cs *engine; + struct vgpu_sched_data *vgpu_data; + ktime_t cur_time; /* no target to schedule */ if (!scheduler->next_vgpu) @@ -77,6 +99,14 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) gvt_dbg_sched("switch to next vgpu %d\n", scheduler->next_vgpu->id); + cur_time = ktime_get(); + if (scheduler->current_vgpu) { + vgpu_data = scheduler->current_vgpu->sched_data; + vgpu_data->sched_out_time = cur_time; + } + vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_data->sched_in_time = cur_time; + /* switch current vgpu */ scheduler->current_vgpu = scheduler->next_vgpu; scheduler->next_vgpu = NULL; @@ -88,25 +118,12 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) wake_up(&scheduler->waitq[i]); } -struct tbs_vgpu_data { - struct list_head list; - struct intel_vgpu *vgpu; - /* put some per-vgpu sched stats here */ -}; - -struct tbs_sched_data { - struct intel_gvt *gvt; - struct hrtimer timer; - unsigned long period; - struct list_head runq_head; -}; - /* in nanosecond */ #define GVT_DEFAULT_TIME_SLICE 1000000 -static void tbs_sched_func(struct tbs_sched_data *sched_data) +static void tbs_sched_func(struct gvt_sched_data *sched_data) { - struct tbs_vgpu_data *vgpu_data; + struct vgpu_sched_data *vgpu_data; struct intel_gvt *gvt = sched_data->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -130,7 +147,7 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data) if (pos == &sched_data->runq_head) continue; - vgpu_data = container_of(pos, struct tbs_vgpu_data, list); + vgpu_data = container_of(pos, struct vgpu_sched_data, list); if (!vgpu_has_pending_workload(vgpu_data->vgpu)) continue; @@ -152,7 +169,7 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data) void intel_gvt_schedule(struct intel_gvt *gvt) { - struct tbs_sched_data *sched_data = gvt->scheduler.sched_data; + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; mutex_lock(&gvt->lock); tbs_sched_func(sched_data); @@ -161,9 +178,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt) static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) { - struct tbs_sched_data *data; + struct gvt_sched_data *data; - data = container_of(timer_data, struct tbs_sched_data, timer); + data = container_of(timer_data, struct gvt_sched_data, timer); intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); @@ -177,7 +194,7 @@ static int tbs_sched_init(struct intel_gvt *gvt) struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct tbs_sched_data *data; + struct gvt_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) @@ -198,7 +215,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct tbs_sched_data *data = scheduler->sched_data; + struct gvt_sched_data *data = scheduler->sched_data; hrtimer_cancel(&data->timer); @@ -208,7 +225,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt) static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { - struct tbs_vgpu_data *data; + struct vgpu_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) @@ -230,8 +247,8 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { - struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; + struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; if (!list_empty(&vgpu_data->list)) return; @@ -245,7 +262,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; list_del_init(&vgpu_data->list); }