scheduler.c 38.8 KB
Newer Older
Z
Zhi Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Authors:
 *    Zhi Wang <zhi.a.wang@intel.com>
 *
 * Contributors:
 *    Ping Gao <ping.a.gao@intel.com>
 *    Tina Zhang <tina.zhang@intel.com>
 *    Chanbin Du <changbin.du@intel.com>
 *    Min He <min.he@intel.com>
 *    Bing Niu <bing.niu@intel.com>
 *    Zhenyu Wang <zhenyuw@linux.intel.com>
 *
 */

#include <linux/kthread.h>

38 39 40
#include "i915_drv.h"
#include "gvt.h"

Z
Zhi Wang 已提交
41 42 43
#define RING_CTX_OFF(x) \
	offsetof(struct execlist_ring_context, x)

44 45
static void set_context_pdp_root_pointer(
		struct execlist_ring_context *ring_context,
Z
Zhi Wang 已提交
46 47 48 49 50
		u32 pdp[8])
{
	int i;

	for (i = 0; i < 8; i++)
51
		ring_context->pdps[i].val = pdp[7 - i];
Z
Zhi Wang 已提交
52 53
}

54 55 56
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
	struct drm_i915_gem_object *ctx_obj =
57
		workload->req->hw_context->state->obj;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;

	if (WARN_ON(!workload->shadow_mm))
		return;

	if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
		return;

	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
	shadow_ring_context = kmap(page);
	set_context_pdp_root_pointer(shadow_ring_context,
			(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
	kunmap(page);
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
/*
 * when populating shadow ctx from guest, we should not overrride oa related
 * registers, so that they will not be overlapped by guest oa configs. Thus
 * made it possible to capture oa data from host for both host and guests.
 */
static void sr_oa_regs(struct intel_vgpu_workload *workload,
		u32 *reg_state, bool save)
{
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
	u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
	int i = 0;
	u32 flex_mmio[] = {
		i915_mmio_reg_offset(EU_PERF_CNTL0),
		i915_mmio_reg_offset(EU_PERF_CNTL1),
		i915_mmio_reg_offset(EU_PERF_CNTL2),
		i915_mmio_reg_offset(EU_PERF_CNTL3),
		i915_mmio_reg_offset(EU_PERF_CNTL4),
		i915_mmio_reg_offset(EU_PERF_CNTL5),
		i915_mmio_reg_offset(EU_PERF_CNTL6),
	};

96
	if (workload->ring_id != RCS)
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
		return;

	if (save) {
		workload->oactxctrl = reg_state[ctx_oactxctrl + 1];

		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
			u32 state_offset = ctx_flexeu0 + i * 2;

			workload->flex_mmio[i] = reg_state[state_offset + 1];
		}
	} else {
		reg_state[ctx_oactxctrl] =
			i915_mmio_reg_offset(GEN8_OACTXCONTROL);
		reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;

		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
			u32 state_offset = ctx_flexeu0 + i * 2;
			u32 mmio = flex_mmio[i];

			reg_state[state_offset] = mmio;
			reg_state[state_offset + 1] = workload->flex_mmio[i];
		}
	}
}

Z
Zhi Wang 已提交
122 123 124 125 126 127
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_gvt *gvt = vgpu->gvt;
	int ring_id = workload->ring_id;
	struct drm_i915_gem_object *ctx_obj =
128
		workload->req->hw_context->state->obj;
Z
Zhi Wang 已提交
129 130 131 132 133 134 135 136 137
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;
	void *dst;
	unsigned long context_gpa, context_page_num;
	int i;

	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
			workload->ctx_desc.lrca);

138
	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Z
Zhi Wang 已提交
139 140 141 142 143 144 145 146 147 148 149

	context_page_num = context_page_num >> PAGE_SHIFT;

	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
		context_page_num = 19;

	i = 2;

	while (i < context_page_num) {
		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
				(u32)((workload->ctx_desc.lrca + i) <<
Z
Zhi Wang 已提交
150
				I915_GTT_PAGE_SHIFT));
Z
Zhi Wang 已提交
151
		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
152
			gvt_vgpu_err("Invalid guest context descriptor\n");
153
			return -EFAULT;
Z
Zhi Wang 已提交
154 155
		}

156
		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
157
		dst = kmap(page);
Z
Zhi Wang 已提交
158
		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
Z
Zhi Wang 已提交
159
				I915_GTT_PAGE_SIZE);
160
		kunmap(page);
Z
Zhi Wang 已提交
161 162 163 164
		i++;
	}

	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
165
	shadow_ring_context = kmap(page);
Z
Zhi Wang 已提交
166

167
	sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
Z
Zhi Wang 已提交
168 169 170
#define COPY_REG(name) \
	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
171 172 173 174 175 176
#define COPY_REG_MASKED(name) {\
		intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
					      + RING_CTX_OFF(name.val),\
					      &shadow_ring_context->name.val, 4);\
		shadow_ring_context->name.val |= 0xffff << 16;\
	}
Z
Zhi Wang 已提交
177

178
	COPY_REG_MASKED(ctx_ctrl);
Z
Zhi Wang 已提交
179 180 181 182 183 184 185 186
	COPY_REG(ctx_timestamp);

	if (ring_id == RCS) {
		COPY_REG(bb_per_ctx_ptr);
		COPY_REG(rcs_indirect_ctx);
		COPY_REG(rcs_indirect_ctx_offset);
	}
#undef COPY_REG
187
#undef COPY_REG_MASKED
Z
Zhi Wang 已提交
188 189 190 191 192 193

	intel_gvt_hypervisor_read_gpa(vgpu,
			workload->ring_context_gpa +
			sizeof(*shadow_ring_context),
			(void *)shadow_ring_context +
			sizeof(*shadow_ring_context),
Z
Zhi Wang 已提交
194
			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Z
Zhi Wang 已提交
195

196
	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
197
	kunmap(page);
Z
Zhi Wang 已提交
198 199 200
	return 0;
}

201
static inline bool is_gvt_request(struct i915_request *req)
202
{
C
Chris Wilson 已提交
203
	return i915_gem_context_force_single_submission(req->gem_context);
204 205
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
	i915_reg_t reg;

	reg = RING_INSTDONE(ring_base);
	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
	reg = RING_ACTHD(ring_base);
	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
	reg = RING_ACTHD_UDW(ring_base);
	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
}

Z
Zhi Wang 已提交
220 221 222
static int shadow_context_status_change(struct notifier_block *nb,
		unsigned long action, void *data)
{
223
	struct i915_request *req = data;
224 225 226
	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
				shadow_ctx_notifier_block[req->engine->id]);
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
227 228
	enum intel_engine_id ring_id = req->engine->id;
	struct intel_vgpu_workload *workload;
229
	unsigned long flags;
230 231

	if (!is_gvt_request(req)) {
232
		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
233 234 235 236 237 238 239
		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
		    scheduler->engine_owner[ring_id]) {
			/* Switch ring from vGPU to host. */
			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
					      NULL, ring_id);
			scheduler->engine_owner[ring_id] = NULL;
		}
240
		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
241 242 243

		return NOTIFY_OK;
	}
Z
Zhi Wang 已提交
244

245 246
	workload = scheduler->current_workload[ring_id];
	if (unlikely(!workload))
247 248
		return NOTIFY_OK;

Z
Zhi Wang 已提交
249 250
	switch (action) {
	case INTEL_CONTEXT_SCHEDULE_IN:
251
		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
252 253 254 255 256 257 258 259
		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
			/* Switch ring from host to vGPU or vGPU to vGPU. */
			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
					      workload->vgpu, ring_id);
			scheduler->engine_owner[ring_id] = workload->vgpu;
		} else
			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
				      ring_id, workload->vgpu->id);
260
		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Z
Zhi Wang 已提交
261 262 263
		atomic_set(&workload->shadow_ctx_active, 1);
		break;
	case INTEL_CONTEXT_SCHEDULE_OUT:
264
		save_ring_hw_state(workload->vgpu, ring_id);
Z
Zhi Wang 已提交
265 266
		atomic_set(&workload->shadow_ctx_active, 0);
		break;
267 268 269
	case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
		save_ring_hw_state(workload->vgpu, ring_id);
		break;
Z
Zhi Wang 已提交
270 271 272 273 274 275 276 277
	default:
		WARN_ON(1);
		return NOTIFY_OK;
	}
	wake_up(&workload->shadow_ctx_status_wq);
	return NOTIFY_OK;
}

278
static void shadow_context_descriptor_update(struct intel_context *ce)
279 280 281 282 283 284 285 286 287
{
	u64 desc = 0;

	desc = ce->lrc_desc;

	/* Update bits 0-11 of the context descriptor which includes flags
	 * like GEN8_CTX_* cached in desc_template
	 */
	desc &= U64_MAX << 12;
288
	desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
289 290 291 292

	ce->lrc_desc = desc;
}

293 294 295
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
296
	struct i915_request *req = workload->req;
297 298
	void *shadow_ring_buffer_va;
	u32 *cs;
299

300 301
	if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
		&& is_inhibit_context(req->hw_context))
302
		intel_vgpu_restore_inhibit_context(vgpu, req);
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

	/* allocate shadow ring buffer */
	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
	if (IS_ERR(cs)) {
		gvt_vgpu_err("fail to alloc size =%ld shadow  ring buffer\n",
			workload->rb_len);
		return PTR_ERR(cs);
	}

	shadow_ring_buffer_va = workload->shadow_ring_buffer_va;

	/* get shadow ring buffer va */
	workload->shadow_ring_buffer_va = cs;

	memcpy(cs, shadow_ring_buffer_va,
			workload->rb_len);

	cs += workload->rb_len / sizeof(u32);
	intel_ring_advance(workload->req, cs);

	return 0;
}

326
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
327 328 329 330 331 332 333 334
{
	if (!wa_ctx->indirect_ctx.obj)
		return;

	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}

335 336 337 338 339 340 341 342 343
/**
 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
 * shadow it as well, include ringbuffer,wa_ctx and ctx.
 * @workload: an abstract entity for each execlist submission.
 *
 * This function is called before the workload submitting to i915, to make
 * sure the content of the workload is valid.
 */
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Z
Zhi Wang 已提交
344
{
345 346 347 348
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
349 350
	struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
	struct intel_context *ce;
351
	struct i915_request *rq;
Z
Zhi Wang 已提交
352 353
	int ret;

354 355
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

356
	if (workload->req)
357
		return 0;
Z
Zhi Wang 已提交
358

359 360 361 362 363 364 365 366 367 368 369 370 371
	/* pin shadow context by gvt even the shadow context will be pinned
	 * when i915 alloc request. That is because gvt will update the guest
	 * context from shadow context when workload is completed, and at that
	 * moment, i915 may already unpined the shadow context to make the
	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
	 * the guest context, gvt can unpin the shadow_ctx safely.
	 */
	ce = intel_context_pin(shadow_ctx, engine);
	if (IS_ERR(ce)) {
		gvt_vgpu_err("fail to pin shadow context\n");
		return PTR_ERR(ce);
	}

372 373
	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Z
Zhi Wang 已提交
374 375
				    GEN8_CTX_ADDRESSING_MODE_SHIFT;

376 377
	if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
		shadow_context_descriptor_update(ce);
378

379
	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Z
Zhi Wang 已提交
380
	if (ret)
381
		goto err_unpin;
Z
Zhi Wang 已提交
382

383 384 385 386
	if ((workload->ring_id == RCS) &&
	    (workload->wa_ctx.indirect_ctx.size != 0)) {
		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
		if (ret)
387
			goto err_shadow;
388
	}
Z
Zhi Wang 已提交
389

390
	rq = i915_request_alloc(engine, shadow_ctx);
391 392 393
	if (IS_ERR(rq)) {
		gvt_vgpu_err("fail to allocate gem request\n");
		ret = PTR_ERR(rq);
394
		goto err_shadow;
395
	}
396
	workload->req = i915_request_get(rq);
397

Z
Zhi Wang 已提交
398 399
	ret = populate_shadow_context(workload);
	if (ret)
400
		goto err_req;
401

402 403 404 405
	return 0;
err_req:
	rq = fetch_and_zero(&workload->req);
	i915_request_put(rq);
406 407
err_shadow:
	release_shadow_wa_ctx(&workload->wa_ctx);
408
err_unpin:
409
	intel_context_unpin(ce);
410 411 412
	return ret;
}

413 414
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);

415 416 417 418
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
	struct intel_gvt *gvt = workload->vgpu->gvt;
	const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
419 420 421 422
	struct intel_vgpu_shadow_bb *bb;
	int ret;

	list_for_each_entry(bb, &workload->shadow_bb, list) {
423 424 425 426 427 428 429 430 431 432 433
		/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
		 * is only updated into ring_scan_buffer, not real ring address
		 * allocated in later copy_workload_to_ring_buffer. pls be noted
		 * shadow_ring_buffer_va is now pointed to real ring buffer va
		 * in copy_workload_to_ring_buffer.
		 */

		if (bb->bb_offset)
			bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
				+ bb->bb_offset;

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
		if (bb->ppgtt) {
			/* for non-priv bb, scan&shadow is only for
			 * debugging purpose, so the content of shadow bb
			 * is the same as original bb. Therefore,
			 * here, rather than switch to shadow bb's gma
			 * address, we directly use original batch buffer's
			 * gma address, and send original bb to hardware
			 * directly
			 */
			if (bb->clflush & CLFLUSH_AFTER) {
				drm_clflush_virt_range(bb->va,
						bb->obj->base.size);
				bb->clflush &= ~CLFLUSH_AFTER;
			}
			i915_gem_obj_finish_shmem_access(bb->obj);
			bb->accessing = false;

		} else {
			bb->vma = i915_gem_object_ggtt_pin(bb->obj,
					NULL, 0, 0, 0);
			if (IS_ERR(bb->vma)) {
				ret = PTR_ERR(bb->vma);
				goto err;
			}

			/* relocate shadow batch buffer */
			bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
			if (gmadr_bytes == 8)
				bb->bb_start_cmd_va[2] = 0;

			/* No one is going to touch shadow bb from now on. */
			if (bb->clflush & CLFLUSH_AFTER) {
				drm_clflush_virt_range(bb->va,
						bb->obj->base.size);
				bb->clflush &= ~CLFLUSH_AFTER;
			}

			ret = i915_gem_object_set_to_gtt_domain(bb->obj,
					false);
			if (ret)
				goto err;

			i915_gem_obj_finish_shmem_access(bb->obj);
			bb->accessing = false;

479 480 481 482 483
			ret = i915_vma_move_to_active(bb->vma,
						      workload->req,
						      0);
			if (ret)
				goto err;
484
		}
485 486
	}
	return 0;
487 488 489
err:
	release_shadow_batch_buffer(workload);
	return ret;
490 491
}

492
static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
493
{
494 495 496 497 498
	struct intel_vgpu_workload *workload =
		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
	struct i915_request *rq = workload->req;
	struct execlist_ring_context *shadow_ring_context =
		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538

	shadow_ring_context->bb_per_ctx_ptr.val =
		(shadow_ring_context->bb_per_ctx_ptr.val &
		(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
	shadow_ring_context->rcs_indirect_ctx.val =
		(shadow_ring_context->rcs_indirect_ctx.val &
		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
}

static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
	struct i915_vma *vma;
	unsigned char *per_ctx_va =
		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
		wa_ctx->indirect_ctx.size;

	if (wa_ctx->indirect_ctx.size == 0)
		return 0;

	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
				       0, CACHELINE_BYTES, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	/* FIXME: we are not tracking our pinned VMA leaving it
	 * up to the core to fix up the stray pin_count upon
	 * free.
	 */

	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);

	wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
	memset(per_ctx_va, 0, CACHELINE_BYTES);

	update_wa_ctx_2_shadow_ctx(wa_ctx);
	return 0;
}

static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	struct intel_vgpu *vgpu = workload->vgpu;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_vgpu_shadow_bb *bb, *pos;

	if (list_empty(&workload->shadow_bb))
		return;

	bb = list_first_entry(&workload->shadow_bb,
			struct intel_vgpu_shadow_bb, list);

	mutex_lock(&dev_priv->drm.struct_mutex);

	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
		if (bb->obj) {
			if (bb->accessing)
				i915_gem_obj_finish_shmem_access(bb->obj);

			if (bb->va && !IS_ERR(bb->va))
				i915_gem_object_unpin_map(bb->obj);

			if (bb->vma && !IS_ERR(bb->vma)) {
				i915_vma_unpin(bb->vma);
				i915_vma_close(bb->vma);
			}
			__i915_gem_object_release_unless_active(bb->obj);
564
		}
565 566
		list_del(&bb->list);
		kfree(bb);
567
	}
568 569

	mutex_unlock(&dev_priv->drm.struct_mutex);
570 571
}

572 573
static int prepare_workload(struct intel_vgpu_workload *workload)
{
574
	struct intel_vgpu *vgpu = workload->vgpu;
575 576
	int ret = 0;

577 578 579 580 581 582
	ret = intel_vgpu_pin_mm(workload->shadow_mm);
	if (ret) {
		gvt_vgpu_err("fail to vgpu pin mm\n");
		return ret;
	}

583 584
	update_shadow_pdps(workload);

585 586 587 588 589 590 591 592 593 594 595 596
	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
	if (ret) {
		gvt_vgpu_err("fail to vgpu sync oos pages\n");
		goto err_unpin_mm;
	}

	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
	if (ret) {
		gvt_vgpu_err("fail to flush post shadow\n");
		goto err_unpin_mm;
	}

597
	ret = copy_workload_to_ring_buffer(workload);
598 599 600 601 602
	if (ret) {
		gvt_vgpu_err("fail to generate request\n");
		goto err_unpin_mm;
	}

603 604 605 606 607 608 609 610 611 612 613 614 615
	ret = prepare_shadow_batch_buffer(workload);
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
		goto err_unpin_mm;
	}

	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
		goto err_shadow_batch;
	}

	if (workload->prepare) {
616
		ret = workload->prepare(workload);
617 618 619
		if (ret)
			goto err_shadow_wa_ctx;
	}
620

621 622 623 624 625 626 627
	return 0;
err_shadow_wa_ctx:
	release_shadow_wa_ctx(&workload->wa_ctx);
err_shadow_batch:
	release_shadow_batch_buffer(workload);
err_unpin_mm:
	intel_vgpu_unpin_mm(workload->shadow_mm);
628 629 630
	return ret;
}

631 632
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
633 634
	struct intel_vgpu *vgpu = workload->vgpu;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
635
	int ring_id = workload->ring_id;
636
	int ret;
637 638 639 640

	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
		ring_id, workload);

641
	mutex_lock(&vgpu->vgpu_lock);
642 643 644
	mutex_lock(&dev_priv->drm.struct_mutex);

	ret = intel_gvt_scan_and_shadow_workload(workload);
Z
Zhi Wang 已提交
645
	if (ret)
646
		goto out;
Z
Zhi Wang 已提交
647

648
	ret = prepare_workload(workload);
Z
Zhi Wang 已提交
649

650 651 652
out:
	if (ret)
		workload->status = ret;
653

654 655 656
	if (!IS_ERR_OR_NULL(workload->req)) {
		gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
				ring_id, workload->req);
657
		i915_request_add(workload->req);
658 659
		workload->dispatched = true;
	}
660

661
	mutex_unlock(&dev_priv->drm.struct_mutex);
662
	mutex_unlock(&vgpu->vgpu_lock);
Z
Zhi Wang 已提交
663 664 665 666 667 668 669 670 671
	return ret;
}

static struct intel_vgpu_workload *pick_next_workload(
		struct intel_gvt *gvt, int ring_id)
{
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_vgpu_workload *workload = NULL;

672
	mutex_lock(&gvt->sched_lock);
Z
Zhi Wang 已提交
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687

	/*
	 * no current vgpu / will be scheduled out / no workload
	 * bail out
	 */
	if (!scheduler->current_vgpu) {
		gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
		goto out;
	}

	if (scheduler->need_reschedule) {
		gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
		goto out;
	}

688
	if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Z
Zhi Wang 已提交
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
		goto out;

	/*
	 * still have current workload, maybe the workload disptacher
	 * fail to submit it for some reason, resubmit it.
	 */
	if (scheduler->current_workload[ring_id]) {
		workload = scheduler->current_workload[ring_id];
		gvt_dbg_sched("ring id %d still have current workload %p\n",
				ring_id, workload);
		goto out;
	}

	/*
	 * pick a workload as current workload
	 * once current workload is set, schedule policy routines
	 * will wait the current workload is finished when trying to
	 * schedule out a vgpu.
	 */
	scheduler->current_workload[ring_id] = container_of(
			workload_q_head(scheduler->current_vgpu, ring_id)->next,
			struct intel_vgpu_workload, list);

	workload = scheduler->current_workload[ring_id];

	gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);

716
	atomic_inc(&workload->vgpu->submission.running_workload_num);
Z
Zhi Wang 已提交
717
out:
718
	mutex_unlock(&gvt->sched_lock);
Z
Zhi Wang 已提交
719 720 721 722 723
	return workload;
}

static void update_guest_context(struct intel_vgpu_workload *workload)
{
724
	struct i915_request *rq = workload->req;
Z
Zhi Wang 已提交
725 726
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_gvt *gvt = vgpu->gvt;
727
	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
Z
Zhi Wang 已提交
728 729 730 731 732 733
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;
	void *src;
	unsigned long context_gpa, context_page_num;
	int i;

734 735
	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
		      workload->ctx_desc.lrca);
Z
Zhi Wang 已提交
736

737
	context_page_num = rq->engine->context_size;
Z
Zhi Wang 已提交
738 739
	context_page_num = context_page_num >> PAGE_SHIFT;

740
	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
Z
Zhi Wang 已提交
741 742 743 744 745 746 747
		context_page_num = 19;

	i = 2;

	while (i < context_page_num) {
		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
				(u32)((workload->ctx_desc.lrca + i) <<
Z
Zhi Wang 已提交
748
					I915_GTT_PAGE_SHIFT));
Z
Zhi Wang 已提交
749
		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
750
			gvt_vgpu_err("invalid guest context descriptor\n");
Z
Zhi Wang 已提交
751 752 753
			return;
		}

754
		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
755
		src = kmap(page);
Z
Zhi Wang 已提交
756
		intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Z
Zhi Wang 已提交
757
				I915_GTT_PAGE_SIZE);
758
		kunmap(page);
Z
Zhi Wang 已提交
759 760 761 762 763 764 765
		i++;
	}

	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
		RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);

	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
766
	shadow_ring_context = kmap(page);
Z
Zhi Wang 已提交
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781

#define COPY_REG(name) \
	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)

	COPY_REG(ctx_ctrl);
	COPY_REG(ctx_timestamp);

#undef COPY_REG

	intel_gvt_hypervisor_write_gpa(vgpu,
			workload->ring_context_gpa +
			sizeof(*shadow_ring_context),
			(void *)shadow_ring_context +
			sizeof(*shadow_ring_context),
Z
Zhi Wang 已提交
782
			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Z
Zhi Wang 已提交
783

784
	kunmap(page);
Z
Zhi Wang 已提交
785 786
}

787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	unsigned int tmp;

	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&s->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			intel_vgpu_destroy_workload(pos);
		}
		clear_bit(engine->id, s->shadow_ctx_desc_updated);
	}
}

Z
Zhi Wang 已提交
806 807 808
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
809 810 811 812
	struct intel_vgpu_workload *workload =
		scheduler->current_workload[ring_id];
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
813
	struct i915_request *rq = workload->req;
Z
Zhi Wang 已提交
814
	int event;
Z
Zhi Wang 已提交
815

816
	mutex_lock(&vgpu->vgpu_lock);
817
	mutex_lock(&gvt->sched_lock);
Z
Zhi Wang 已提交
818

819 820 821 822
	/* For the workload w/ request, needs to wait for the context
	 * switch to make sure request is completed.
	 * For the workload w/o request, directly complete the workload.
	 */
823
	if (rq) {
Z
Zhi Wang 已提交
824 825 826
		wait_event(workload->shadow_ctx_status_wq,
			   !atomic_read(&workload->shadow_ctx_active));

827 828 829 830 831 832 833 834 835 836 837 838
		/* If this request caused GPU hang, req->fence.error will
		 * be set to -EIO. Use -EIO to set workload status so
		 * that when this request caused GPU hang, didn't trigger
		 * context switch interrupt to guest.
		 */
		if (likely(workload->status == -EINPROGRESS)) {
			if (workload->req->fence.error == -EIO)
				workload->status = -EIO;
			else
				workload->status = 0;
		}

839 840
		if (!workload->status && !(vgpu->resetting_eng &
					   ENGINE_MASK(ring_id))) {
841
			update_guest_context(workload);
Z
Zhi Wang 已提交
842

843 844 845 846
			for_each_set_bit(event, workload->pending_events,
					 INTEL_GVT_EVENT_MAX)
				intel_vgpu_trigger_virtual_event(vgpu, event);
		}
847

848
		/* unpin shadow ctx as the shadow_ctx update is done */
849 850 851 852
		mutex_lock(&rq->i915->drm.struct_mutex);
		intel_context_unpin(rq->hw_context);
		mutex_unlock(&rq->i915->drm.struct_mutex);

853
		i915_request_put(fetch_and_zero(&workload->req));
Z
Zhi Wang 已提交
854 855 856 857 858 859 860 861
	}

	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
			ring_id, workload, workload->status);

	scheduler->current_workload[ring_id] = NULL;

	list_del_init(&workload->list);
862 863 864 865 866 867

	if (!workload->status) {
		release_shadow_batch_buffer(workload);
		release_shadow_wa_ctx(&workload->wa_ctx);
	}

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
		/* if workload->status is not successful means HW GPU
		 * has occurred GPU hang or something wrong with i915/GVT,
		 * and GVT won't inject context switch interrupt to guest.
		 * So this error is a vGPU hang actually to the guest.
		 * According to this we should emunlate a vGPU hang. If
		 * there are pending workloads which are already submitted
		 * from guest, we should clean them up like HW GPU does.
		 *
		 * if it is in middle of engine resetting, the pending
		 * workloads won't be submitted to HW GPU and will be
		 * cleaned up during the resetting process later, so doing
		 * the workload clean up here doesn't have any impact.
		 **/
		clean_workloads(vgpu, ENGINE_MASK(ring_id));
	}

Z
Zhi Wang 已提交
885 886
	workload->complete(workload);

887
	atomic_dec(&s->running_workload_num);
Z
Zhi Wang 已提交
888
	wake_up(&scheduler->workload_complete_wq);
889 890 891 892

	if (gvt->scheduler.need_reschedule)
		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);

893
	mutex_unlock(&gvt->sched_lock);
894
	mutex_unlock(&vgpu->vgpu_lock);
Z
Zhi Wang 已提交
895 896 897 898 899 900 901 902 903 904 905 906 907 908
}

struct workload_thread_param {
	struct intel_gvt *gvt;
	int ring_id;
};

static int workload_thread(void *priv)
{
	struct workload_thread_param *p = (struct workload_thread_param *)priv;
	struct intel_gvt *gvt = p->gvt;
	int ring_id = p->ring_id;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_vgpu_workload *workload = NULL;
909
	struct intel_vgpu *vgpu = NULL;
Z
Zhi Wang 已提交
910
	int ret;
911
	bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
912 913
			|| IS_KABYLAKE(gvt->dev_priv)
			|| IS_BROXTON(gvt->dev_priv);
914
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
Z
Zhi Wang 已提交
915 916 917 918 919 920

	kfree(p);

	gvt_dbg_core("workload thread for ring %d started\n", ring_id);

	while (!kthread_should_stop()) {
921 922 923 924 925 926 927 928 929 930 931
		add_wait_queue(&scheduler->waitq[ring_id], &wait);
		do {
			workload = pick_next_workload(gvt, ring_id);
			if (workload)
				break;
			wait_woken(&wait, TASK_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT);
		} while (!kthread_should_stop());
		remove_wait_queue(&scheduler->waitq[ring_id], &wait);

		if (!workload)
Z
Zhi Wang 已提交
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
			break;

		gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
				workload->ring_id, workload,
				workload->vgpu->id);

		intel_runtime_pm_get(gvt->dev_priv);

		gvt_dbg_sched("ring id %d will dispatch workload %p\n",
				workload->ring_id, workload);

		if (need_force_wake)
			intel_uncore_forcewake_get(gvt->dev_priv,
					FORCEWAKE_ALL);

		ret = dispatch_workload(workload);
948

Z
Zhi Wang 已提交
949
		if (ret) {
950 951
			vgpu = workload->vgpu;
			gvt_vgpu_err("fail to dispatch workload, skip\n");
Z
Zhi Wang 已提交
952 953 954 955 956
			goto complete;
		}

		gvt_dbg_sched("ring id %d wait workload %p\n",
				workload->ring_id, workload);
957
		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Z
Zhi Wang 已提交
958 959

complete:
960
		gvt_dbg_sched("will complete workload %p, status: %d\n",
Z
Zhi Wang 已提交
961 962
				workload, workload->status);

963 964
		complete_current_workload(gvt, ring_id);

Z
Zhi Wang 已提交
965 966 967 968 969
		if (need_force_wake)
			intel_uncore_forcewake_put(gvt->dev_priv,
					FORCEWAKE_ALL);

		intel_runtime_pm_put(gvt->dev_priv);
970
		if (ret && (vgpu_is_vm_unhealthy(ret)))
971
			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Z
Zhi Wang 已提交
972 973 974 975 976 977
	}
	return 0;
}

void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
978
	struct intel_vgpu_submission *s = &vgpu->submission;
Z
Zhi Wang 已提交
979 980 981
	struct intel_gvt *gvt = vgpu->gvt;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;

982
	if (atomic_read(&s->running_workload_num)) {
Z
Zhi Wang 已提交
983 984 985
		gvt_dbg_sched("wait vgpu idle\n");

		wait_event(scheduler->workload_complete_wq,
986
				!atomic_read(&s->running_workload_num));
Z
Zhi Wang 已提交
987 988 989 990 991 992
	}
}

void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
993 994
	struct intel_engine_cs *engine;
	enum intel_engine_id i;
Z
Zhi Wang 已提交
995 996 997

	gvt_dbg_core("clean workload scheduler\n");

998 999 1000 1001 1002
	for_each_engine(engine, gvt->dev_priv, i) {
		atomic_notifier_chain_unregister(
					&engine->context_status_notifier,
					&gvt->shadow_ctx_notifier_block[i]);
		kthread_stop(scheduler->thread[i]);
Z
Zhi Wang 已提交
1003 1004 1005 1006 1007 1008 1009
	}
}

int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct workload_thread_param *param = NULL;
1010 1011
	struct intel_engine_cs *engine;
	enum intel_engine_id i;
Z
Zhi Wang 已提交
1012 1013 1014 1015 1016 1017
	int ret;

	gvt_dbg_core("init workload scheduler\n");

	init_waitqueue_head(&scheduler->workload_complete_wq);

1018
	for_each_engine(engine, gvt->dev_priv, i) {
Z
Zhi Wang 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
		init_waitqueue_head(&scheduler->waitq[i]);

		param = kzalloc(sizeof(*param), GFP_KERNEL);
		if (!param) {
			ret = -ENOMEM;
			goto err;
		}

		param->gvt = gvt;
		param->ring_id = i;

		scheduler->thread[i] = kthread_run(workload_thread, param,
			"gvt workload %d", i);
		if (IS_ERR(scheduler->thread[i])) {
			gvt_err("fail to create workload thread\n");
			ret = PTR_ERR(scheduler->thread[i]);
			goto err;
		}
1037 1038 1039 1040 1041

		gvt->shadow_ctx_notifier_block[i].notifier_call =
					shadow_context_status_change;
		atomic_notifier_chain_register(&engine->context_status_notifier,
					&gvt->shadow_ctx_notifier_block[i]);
Z
Zhi Wang 已提交
1042 1043 1044 1045 1046 1047 1048 1049 1050
	}
	return 0;
err:
	intel_gvt_clean_workload_scheduler(gvt);
	kfree(param);
	param = NULL;
	return ret;
}

1051 1052 1053 1054 1055 1056 1057 1058
/**
 * intel_vgpu_clean_submission - free submission-related resource for vGPU
 * @vgpu: a vGPU
 *
 * This function is called when a vGPU is being destroyed.
 *
 */
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Z
Zhi Wang 已提交
1059
{
1060 1061
	struct intel_vgpu_submission *s = &vgpu->submission;

1062
	intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1063 1064
	i915_gem_context_put(s->shadow_ctx);
	kmem_cache_destroy(s->workloads);
Z
Zhi Wang 已提交
1065 1066
}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083

/**
 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
 * @vgpu: a vGPU
 * @engine_mask: engines expected to be reset
 *
 * This function is called when a vGPU is being destroyed.
 *
 */
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
		unsigned long engine_mask)
{
	struct intel_vgpu_submission *s = &vgpu->submission;

	if (!s->active)
		return;

1084
	clean_workloads(vgpu, engine_mask);
1085 1086 1087
	s->ops->reset(vgpu, engine_mask);
}

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
/**
 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
 * @vgpu: a vGPU
 *
 * This function is called when a vGPU is being created.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 *
 */
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Z
Zhi Wang 已提交
1099
{
1100
	struct intel_vgpu_submission *s = &vgpu->submission;
1101 1102 1103
	enum intel_engine_id i;
	struct intel_engine_cs *engine;
	int ret;
Z
Zhi Wang 已提交
1104

1105
	s->shadow_ctx = i915_gem_context_create_gvt(
Z
Zhi Wang 已提交
1106
			&vgpu->gvt->dev_priv->drm);
1107 1108
	if (IS_ERR(s->shadow_ctx))
		return PTR_ERR(s->shadow_ctx);
Z
Zhi Wang 已提交
1109

1110
	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1111

1112 1113 1114 1115 1116 1117
	s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
						  sizeof(struct intel_vgpu_workload), 0,
						  SLAB_HWCACHE_ALIGN,
						  offsetof(struct intel_vgpu_workload, rb_tail),
						  sizeof_field(struct intel_vgpu_workload, rb_tail),
						  NULL);
1118

1119
	if (!s->workloads) {
1120 1121 1122 1123 1124
		ret = -ENOMEM;
		goto out_shadow_ctx;
	}

	for_each_engine(engine, vgpu->gvt->dev_priv, i)
1125
		INIT_LIST_HEAD(&s->workload_q_head[i]);
1126

1127
	atomic_set(&s->running_workload_num, 0);
1128
	bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1129

Z
Zhi Wang 已提交
1130
	return 0;
1131 1132

out_shadow_ctx:
1133
	i915_gem_context_put(s->shadow_ctx);
1134
	return ret;
Z
Zhi Wang 已提交
1135
}
1136

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
/**
 * intel_vgpu_select_submission_ops - select virtual submission interface
 * @vgpu: a vGPU
 * @interface: expected vGPU virtual submission interface
 *
 * This function is called when guest configures submission interface.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 *
 */
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1149
				     unsigned long engine_mask,
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
				     unsigned int interface)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	const struct intel_vgpu_submission_ops *ops[] = {
		[INTEL_VGPU_EXECLIST_SUBMISSION] =
			&intel_vgpu_execlist_submission_ops,
	};
	int ret;

	if (WARN_ON(interface >= ARRAY_SIZE(ops)))
		return -EINVAL;

1162 1163 1164 1165
	if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
		return -EINVAL;

	if (s->active)
1166
		s->ops->clean(vgpu, engine_mask);
1167 1168 1169 1170

	if (interface == 0) {
		s->ops = NULL;
		s->virtual_submission_interface = 0;
1171 1172
		s->active = false;
		gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1173 1174 1175
		return 0;
	}

1176
	ret = ops[interface]->init(vgpu, engine_mask);
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (ret)
		return ret;

	s->ops = ops[interface];
	s->virtual_submission_interface = interface;
	s->active = true;

	gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
			vgpu->id, s->ops->name);

	return 0;
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
/**
 * intel_vgpu_destroy_workload - destroy a vGPU workload
 * @vgpu: a vGPU
 *
 * This function is called when destroy a vGPU workload.
 *
 */
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu_submission *s = &workload->vgpu->submission;

	if (workload->shadow_mm)
1202
		intel_vgpu_mm_put(workload->shadow_mm);
1203 1204 1205 1206

	kmem_cache_free(s->workloads, workload);
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
static struct intel_vgpu_workload *
alloc_workload(struct intel_vgpu *vgpu)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct intel_vgpu_workload *workload;

	workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
	if (!workload)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&workload->list);
	INIT_LIST_HEAD(&workload->shadow_bb);

	init_waitqueue_head(&workload->shadow_ctx_status_wq);
	atomic_set(&workload->shadow_ctx_active, 0);

	workload->status = -EINPROGRESS;
	workload->vgpu = vgpu;

	return workload;
}

#define RING_CTX_OFF(x) \
	offsetof(struct execlist_ring_context, x)

static void read_guest_pdps(struct intel_vgpu *vgpu,
		u64 ring_context_gpa, u32 pdp[8])
{
	u64 gpa;
	int i;

1238
	gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

	for (i = 0; i < 8; i++)
		intel_gvt_hypervisor_read_gpa(vgpu,
				gpa + i * 8, &pdp[7 - i], 4);
}

static int prepare_mm(struct intel_vgpu_workload *workload)
{
	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
	struct intel_vgpu_mm *mm;
	struct intel_vgpu *vgpu = workload->vgpu;
1250 1251
	intel_gvt_gtt_type_t root_entry_type;
	u64 pdps[GVT_RING_CTX_NR_PDPS];
1252

1253 1254 1255 1256 1257 1258 1259 1260
	switch (desc->addressing_mode) {
	case 1: /* legacy 32-bit */
		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
		break;
	case 3: /* legacy 64-bit */
		root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
		break;
	default:
1261 1262 1263 1264
		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
		return -EINVAL;
	}

1265
	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1266

1267 1268 1269 1270
	mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
	if (IS_ERR(mm))
		return PTR_ERR(mm);

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
	workload->shadow_mm = mm;
	return 0;
}

#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
		((a)->lrca == (b)->lrca))

#define get_last_workload(q) \
	(list_empty(q) ? NULL : container_of(q->prev, \
	struct intel_vgpu_workload, list))
1281 1282 1283
/**
 * intel_vgpu_create_workload - create a vGPU workload
 * @vgpu: a vGPU
1284
 * @desc: a guest context descriptor
1285 1286 1287 1288 1289 1290 1291 1292 1293
 *
 * This function is called when creating a vGPU workload.
 *
 * Returns:
 * struct intel_vgpu_workload * on success, negative error code in
 * pointer if failed.
 *
 */
struct intel_vgpu_workload *
1294 1295
intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
			   struct execlist_ctx_descriptor_format *desc)
1296 1297
{
	struct intel_vgpu_submission *s = &vgpu->submission;
1298 1299 1300 1301 1302 1303 1304
	struct list_head *q = workload_q_head(vgpu, ring_id);
	struct intel_vgpu_workload *last_workload = get_last_workload(q);
	struct intel_vgpu_workload *workload = NULL;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	u64 ring_context_gpa;
	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
	int ret;
1305

1306
	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Z
Zhi Wang 已提交
1307
			(u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1308 1309 1310 1311
	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
		gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
		return ERR_PTR(-EINVAL);
	}
1312

1313 1314
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(ring_header.val), &head, 4);
1315

1316 1317
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(ring_tail.val), &tail, 4);
1318

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
	head &= RB_HEAD_OFF_MASK;
	tail &= RB_TAIL_OFF_MASK;

	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
		gvt_dbg_el("ctx head %x real head %lx\n", head,
				last_workload->rb_tail);
		/*
		 * cannot use guest context head pointer here,
		 * as it might not be updated at this time
		 */
		head = last_workload->rb_tail;
	}

	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);

	/* record some ring buffer register values for scan and shadow */
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(rb_start.val), &start, 4);
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);

	workload = alloc_workload(vgpu);
	if (IS_ERR(workload))
		return workload;

	workload->ring_id = ring_id;
	workload->ctx_desc = *desc;
	workload->ring_context_gpa = ring_context_gpa;
	workload->rb_head = head;
	workload->rb_tail = tail;
	workload->rb_start = start;
	workload->rb_ctl = ctl;

	if (ring_id == RCS) {
		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);

		workload->wa_ctx.indirect_ctx.guest_gma =
			indirect_ctx & INDIRECT_CTX_ADDR_MASK;
		workload->wa_ctx.indirect_ctx.size =
			(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
			CACHELINE_BYTES;
		workload->wa_ctx.per_ctx.guest_gma =
			per_ctx & PER_CTX_ADDR_MASK;
		workload->wa_ctx.per_ctx.valid = per_ctx & 1;
	}

	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
			workload, ring_id, head, tail, start, ctl);

	ret = prepare_mm(workload);
	if (ret) {
		kmem_cache_free(s->workloads, workload);
		return ERR_PTR(ret);
	}

	/* Only scan and shadow the first workload in the queue
	 * as there is only one pre-allocated buf-obj for shadow.
	 */
	if (list_empty(workload_q_head(vgpu, ring_id))) {
		intel_runtime_pm_get(dev_priv);
		mutex_lock(&dev_priv->drm.struct_mutex);
		ret = intel_gvt_scan_and_shadow_workload(workload);
		mutex_unlock(&dev_priv->drm.struct_mutex);
		intel_runtime_pm_put(dev_priv);
	}

	if (ret && (vgpu_is_vm_unhealthy(ret))) {
		enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
		intel_vgpu_destroy_workload(workload);
		return ERR_PTR(ret);
	}
1396 1397 1398

	return workload;
}
1399 1400 1401 1402 1403 1404 1405 1406 1407

/**
 * intel_vgpu_queue_workload - Qeue a vGPU workload
 * @workload: the workload to queue in
 */
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
	list_add_tail(&workload->list,
		workload_q_head(workload->vgpu, workload->ring_id));
1408
	intel_gvt_kick_schedule(workload->vgpu->gvt);
1409 1410
	wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
}