diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index b84b7ca3f66f0515ef9ead2871e50cc1150685ce..894735c77f63f0dad4c9c103e4edbf2562639934 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -144,6 +144,11 @@ static int gvt_service_thread(void *data) intel_gvt_emulate_vblank(gvt); mutex_unlock(&gvt->lock); } + + if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, + (void *)&gvt->service_request)) { + intel_gvt_schedule(gvt); + } } return 0; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 6dfc48b63b718b4c4e6f5c62794db5ce279b18a4..7455214b242c8120ddc9b605cdf661b3d8ad53eb 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -249,6 +249,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) enum { INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, + INTEL_GVT_REQUEST_SCHED = 1, }; static inline void intel_gvt_request_service(struct intel_gvt *gvt, diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 34b9acdf34791c84170cd6c96203a5b84f860b77..c8ade8fc511d825a0b577f4756ead55d12261c25 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -96,17 +96,16 @@ struct tbs_vgpu_data { struct tbs_sched_data { struct intel_gvt *gvt; - struct delayed_work work; + struct hrtimer timer; unsigned long period; struct list_head runq_head; }; -#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) +/* in nanosecond */ +#define GVT_DEFAULT_TIME_SLICE 1000000 -static void tbs_sched_func(struct work_struct *work) +static void tbs_sched_func(struct tbs_sched_data *sched_data) { - struct tbs_sched_data *sched_data = container_of(work, - struct tbs_sched_data, work.work); struct tbs_vgpu_data *vgpu_data; struct intel_gvt *gvt = sched_data->gvt; @@ -115,8 +114,6 @@ static void tbs_sched_func(struct work_struct *work) struct intel_vgpu *vgpu = NULL; struct list_head *pos, *head; - mutex_lock(&gvt->lock); - /* no vgpu or has already had a target */ if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) goto out; @@ -151,17 +148,30 @@ static void tbs_sched_func(struct work_struct *work) scheduler->next_vgpu->id); try_to_schedule_next_vgpu(gvt); } +} - /* - * still have vgpu on runq - * or last schedule haven't finished due to running workload - */ - if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) - schedule_delayed_work(&sched_data->work, sched_data->period); +void intel_gvt_schedule(struct intel_gvt *gvt) +{ + struct tbs_sched_data *sched_data = gvt->scheduler.sched_data; + mutex_lock(&gvt->lock); + tbs_sched_func(sched_data); mutex_unlock(&gvt->lock); } +static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) +{ + struct tbs_sched_data *data; + + data = container_of(timer_data, struct tbs_sched_data, timer); + + intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); + + hrtimer_add_expires_ns(&data->timer, data->period); + + return HRTIMER_RESTART; +} + static int tbs_sched_init(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = @@ -174,11 +184,13 @@ static int tbs_sched_init(struct intel_gvt *gvt) return -ENOMEM; INIT_LIST_HEAD(&data->runq_head); - INIT_DELAYED_WORK(&data->work, tbs_sched_func); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; data->gvt = gvt; scheduler->sched_data = data; + return 0; } @@ -188,7 +200,8 @@ static void tbs_sched_clean(struct intel_gvt *gvt) &gvt->scheduler; struct tbs_sched_data *data = scheduler->sched_data; - cancel_delayed_work(&data->work); + hrtimer_cancel(&data->timer); + kfree(data); scheduler->sched_data = NULL; } @@ -205,6 +218,7 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&data->list); vgpu->sched_data = data; + return 0; } @@ -223,7 +237,10 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) return; list_add_tail(&vgpu_data->list, &sched_data->runq_head); - schedule_delayed_work(&sched_data->work, 0); + + if (!hrtimer_active(&sched_data->timer)) + hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), + sched_data->period), HRTIMER_MODE_ABS); } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index bb8b9097e41af382378009013682e8e8e1fb977e..ba00a5f7455fd57688f0c3292951e19b7ad66ec1 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h @@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops { void (*stop_schedule)(struct intel_vgpu *vgpu); }; +void intel_gvt_schedule(struct intel_gvt *gvt); + int intel_gvt_init_sched_policy(struct intel_gvt *gvt); void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);