execlist.c 26.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Authors:
 *    Zhiyuan Lv <zhiyuan.lv@intel.com>
 *    Zhi Wang <zhi.a.wang@intel.com>
 *
 * Contributors:
 *    Min He <min.he@intel.com>
 *    Bing Niu <bing.niu@intel.com>
 *    Ping Gao <ping.a.gao@intel.com>
 *    Tina Zhang <tina.zhang@intel.com>
 *
 */

#include "i915_drv.h"
36
#include "gvt.h"
37 38 39 40 41 42

#define _EL_OFFSET_STATUS       0x234
#define _EL_OFFSET_STATUS_BUF   0x370
#define _EL_OFFSET_STATUS_PTR   0x3A0

#define execlist_ring_mmio(gvt, ring_id, offset) \
43
	(gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
44 45 46 47 48

#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
		((a)->lrca == (b)->lrca))

49 50
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);

51 52 53 54 55 56 57 58 59 60
static int context_switch_events[] = {
	[RCS] = RCS_AS_CONTEXT_SWITCH,
	[BCS] = BCS_AS_CONTEXT_SWITCH,
	[VCS] = VCS_AS_CONTEXT_SWITCH,
	[VCS2] = VCS2_AS_CONTEXT_SWITCH,
	[VECS] = VECS_AS_CONTEXT_SWITCH,
};

static int ring_id_to_context_switch_event(int ring_id)
{
61 62
	if (WARN_ON(ring_id < RCS ||
		    ring_id >= ARRAY_SIZE(context_switch_events)))
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
		return -EINVAL;

	return context_switch_events[ring_id];
}

static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
{
	gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
			execlist->running_slot ?
			execlist->running_slot->index : -1,
			execlist->running_context ?
			execlist->running_context->context_id : 0,
			execlist->pending_slot ?
			execlist->pending_slot->index : -1);

	execlist->running_slot = execlist->pending_slot;
	execlist->pending_slot = NULL;
	execlist->running_context = execlist->running_context ?
		&execlist->running_slot->ctx[0] : NULL;

	gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
			execlist->running_slot ?
			execlist->running_slot->index : -1,
			execlist->running_context ?
			execlist->running_context->context_id : 0,
			execlist->pending_slot ?
			execlist->pending_slot->index : -1);
}

static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
{
	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
	struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
	struct execlist_ctx_descriptor_format *desc = execlist->running_context;
	struct intel_vgpu *vgpu = execlist->vgpu;
	struct execlist_status_format status;
	int ring_id = execlist->ring_id;
	u32 status_reg = execlist_ring_mmio(vgpu->gvt,
			ring_id, _EL_OFFSET_STATUS);

	status.ldw = vgpu_vreg(vgpu, status_reg);
	status.udw = vgpu_vreg(vgpu, status_reg + 4);

	if (running) {
		status.current_execlist_pointer = !!running->index;
		status.execlist_write_pointer = !!!running->index;
		status.execlist_0_active = status.execlist_0_valid =
			!!!(running->index);
		status.execlist_1_active = status.execlist_1_valid =
			!!(running->index);
	} else {
		status.context_id = 0;
		status.execlist_0_active = status.execlist_0_valid = 0;
		status.execlist_1_active = status.execlist_1_valid = 0;
	}

	status.context_id = desc ? desc->context_id : 0;
	status.execlist_queue_full = !!(pending);

	vgpu_vreg(vgpu, status_reg) = status.ldw;
	vgpu_vreg(vgpu, status_reg + 4) = status.udw;

	gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
		vgpu->id, status_reg, status.ldw, status.udw);
}

static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
		struct execlist_context_status_format *status,
		bool trigger_interrupt_later)
{
	struct intel_vgpu *vgpu = execlist->vgpu;
	int ring_id = execlist->ring_id;
	struct execlist_context_status_pointer_format ctx_status_ptr;
	u32 write_pointer;
	u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;

	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
			_EL_OFFSET_STATUS_PTR);
	ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
			_EL_OFFSET_STATUS_BUF);

	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);

	write_pointer = ctx_status_ptr.write_ptr;

	if (write_pointer == 0x7)
		write_pointer = 0;
	else {
		++write_pointer;
		write_pointer %= 0x6;
	}

	offset = ctx_status_buf_reg + write_pointer * 8;

	vgpu_vreg(vgpu, offset) = status->ldw;
	vgpu_vreg(vgpu, offset + 4) = status->udw;

	ctx_status_ptr.write_ptr = write_pointer;
	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;

	gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
		vgpu->id, write_pointer, offset, status->ldw, status->udw);

	if (trigger_interrupt_later)
		return;

	intel_vgpu_trigger_virtual_event(vgpu,
			ring_id_to_context_switch_event(execlist->ring_id));
}

Z
Zhi Wang 已提交
173
static int emulate_execlist_ctx_schedule_out(
174 175 176
		struct intel_vgpu_execlist *execlist,
		struct execlist_ctx_descriptor_format *ctx)
{
177
	struct intel_vgpu *vgpu = execlist->vgpu;
178 179 180 181 182 183 184 185 186 187 188
	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
	struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
	struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
	struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
	struct execlist_context_status_format status;

	memset(&status, 0, sizeof(status));

	gvt_dbg_el("schedule out context id %x\n", ctx->context_id);

	if (WARN_ON(!same_context(ctx, execlist->running_context))) {
189
		gvt_vgpu_err("schedule out context is not running context,"
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
				"ctx id %x running ctx id %x\n",
				ctx->context_id,
				execlist->running_context->context_id);
		return -EINVAL;
	}

	/* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
	if (valid_context(ctx1) && same_context(ctx0, ctx)) {
		gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");

		execlist->running_context = ctx1;

		emulate_execlist_status(execlist);

		status.context_complete = status.element_switch = 1;
		status.context_id = ctx->context_id;

		emulate_csb_update(execlist, &status, false);
		/*
		 * ctx1 is not valid, ctx == ctx0
		 * ctx1 is valid, ctx1 == ctx
		 *	--> last element is finished
		 * emulate:
		 *	active-to-idle if there is *no* pending execlist
		 *	context-complete if there *is* pending execlist
		 */
	} else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
			|| (valid_context(ctx1) && same_context(ctx1, ctx))) {
		gvt_dbg_el("need to switch virtual execlist slot\n");

		switch_virtual_execlist_slot(execlist);

		emulate_execlist_status(execlist);

		status.context_complete = status.active_to_idle = 1;
		status.context_id = ctx->context_id;

		if (!pending) {
			emulate_csb_update(execlist, &status, false);
		} else {
			emulate_csb_update(execlist, &status, true);

			memset(&status, 0, sizeof(status));

			status.idle_to_active = 1;
			status.context_id = 0;

			emulate_csb_update(execlist, &status, false);
		}
	} else {
		WARN_ON(1);
		return -EINVAL;
	}

	return 0;
}

static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
		struct intel_vgpu_execlist *execlist)
{
	struct intel_vgpu *vgpu = execlist->vgpu;
	int ring_id = execlist->ring_id;
	u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
			_EL_OFFSET_STATUS);
	struct execlist_status_format status;

	status.ldw = vgpu_vreg(vgpu, status_reg);
	status.udw = vgpu_vreg(vgpu, status_reg + 4);

	if (status.execlist_queue_full) {
260
		gvt_vgpu_err("virtual execlist slots are full\n");
261 262 263 264 265 266
		return NULL;
	}

	return &execlist->slot[status.execlist_write_pointer];
}

Z
Zhi Wang 已提交
267
static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
268 269 270 271 272 273 274 275
		struct execlist_ctx_descriptor_format ctx[2])
{
	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
	struct intel_vgpu_execlist_slot *slot =
		get_next_execlist_slot(execlist);

	struct execlist_ctx_descriptor_format *ctx0, *ctx1;
	struct execlist_context_status_format status;
276
	struct intel_vgpu *vgpu = execlist->vgpu;
277 278 279 280

	gvt_dbg_el("emulate schedule-in\n");

	if (!slot) {
281
		gvt_vgpu_err("no available execlist slot\n");
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
		return -EINVAL;
	}

	memset(&status, 0, sizeof(status));
	memset(slot->ctx, 0, sizeof(slot->ctx));

	slot->ctx[0] = ctx[0];
	slot->ctx[1] = ctx[1];

	gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
			slot->index, ctx[0].context_id,
			ctx[1].context_id);

	/*
	 * no running execlist, make this write bundle as running execlist
	 * -> idle-to-active
	 */
	if (!running) {
		gvt_dbg_el("no current running execlist\n");

		execlist->running_slot = slot;
		execlist->pending_slot = NULL;
		execlist->running_context = &slot->ctx[0];

		gvt_dbg_el("running slot index %d running context %x\n",
				execlist->running_slot->index,
				execlist->running_context->context_id);

		emulate_execlist_status(execlist);

		status.idle_to_active = 1;
		status.context_id = 0;

		emulate_csb_update(execlist, &status, false);
		return 0;
	}

	ctx0 = &running->ctx[0];
	ctx1 = &running->ctx[1];

	gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
		running->index, ctx0->context_id, ctx1->context_id);

	/*
	 * already has an running execlist
	 *	a. running ctx1 is valid,
	 *	   ctx0 is finished, and running ctx1 == new execlist ctx[0]
	 *	b. running ctx1 is not valid,
	 *	   ctx0 == new execlist ctx[0]
	 * ----> lite-restore + preempted
	 */
	if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
		/* condition a */
		(!same_context(ctx0, execlist->running_context))) ||
			(!valid_context(ctx1) &&
			 same_context(ctx0, &slot->ctx[0]))) { /* condition b */
		gvt_dbg_el("need to switch virtual execlist slot\n");

		execlist->pending_slot = slot;
		switch_virtual_execlist_slot(execlist);

		emulate_execlist_status(execlist);

		status.lite_restore = status.preempted = 1;
		status.context_id = ctx[0].context_id;

		emulate_csb_update(execlist, &status, false);
	} else {
		gvt_dbg_el("emulate as pending slot\n");
		/*
		 * otherwise
		 * --> emulate pending execlist exist + but no preemption case
		 */
		execlist->pending_slot = slot;
		emulate_execlist_status(execlist);
	}
	return 0;
}

Z
Zhi Wang 已提交
361 362 363 364 365 366 367 368 369 370
static void free_workload(struct intel_vgpu_workload *workload)
{
	intel_vgpu_unpin_mm(workload->shadow_mm);
	intel_gvt_mm_unreference(workload->shadow_mm);
	kmem_cache_free(workload->vgpu->workloads, workload);
}

#define get_desc_from_elsp_dwords(ed, i) \
	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))

371
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
Z
Zhi Wang 已提交
372
{
373 374
	const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
	struct intel_shadow_bb_entry *entry_obj;
Z
Zhi Wang 已提交
375 376

	/* pin the gem object to ggtt */
377 378
	list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
		struct i915_vma *vma;
Z
Zhi Wang 已提交
379

380 381
		vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
		if (IS_ERR(vma)) {
382
			return PTR_ERR(vma);
Z
Zhi Wang 已提交
383
		}
384 385 386 387 388 389 390 391 392 393

		/* FIXME: we are not tracking our pinned VMA leaving it
		 * up to the core to fix up the stray pin_count upon
		 * free.
		 */

		/* update the relocate gma with shadow batch buffer*/
		entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
		if (gmadr_bytes == 8)
			entry_obj->bb_start_cmd_va[2] = 0;
Z
Zhi Wang 已提交
394
	}
395
	return 0;
Z
Zhi Wang 已提交
396 397 398 399
}

static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
400 401 402 403 404
	struct intel_vgpu_workload *workload = container_of(wa_ctx,
					struct intel_vgpu_workload,
					wa_ctx);
	int ring_id = workload->ring_id;
	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
Z
Zhi Wang 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;

	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
	shadow_ring_context = kmap_atomic(page);

	shadow_ring_context->bb_per_ctx_ptr.val =
		(shadow_ring_context->bb_per_ctx_ptr.val &
		(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
	shadow_ring_context->rcs_indirect_ctx.val =
		(shadow_ring_context->rcs_indirect_ctx.val &
		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;

	kunmap_atomic(shadow_ring_context);
	return 0;
}

424
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
Z
Zhi Wang 已提交
425 426 427 428 429 430 431
{
	struct i915_vma *vma;
	unsigned char *per_ctx_va =
		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
		wa_ctx->indirect_ctx.size;

	if (wa_ctx->indirect_ctx.size == 0)
432
		return 0;
Z
Zhi Wang 已提交
433

434 435
	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
				       0, CACHELINE_BYTES, 0);
Z
Zhi Wang 已提交
436
	if (IS_ERR(vma)) {
437
		return PTR_ERR(vma);
Z
Zhi Wang 已提交
438
	}
439 440 441 442 443

	/* FIXME: we are not tracking our pinned VMA leaving it
	 * up to the core to fix up the stray pin_count upon
	 * free.
	 */
Z
Zhi Wang 已提交
444

445
	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
Z
Zhi Wang 已提交
446 447 448 449 450

	wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
	memset(per_ctx_va, 0, CACHELINE_BYTES);

	update_wa_ctx_2_shadow_ctx(wa_ctx);
451
	return 0;
Z
Zhi Wang 已提交
452 453
}

Z
Zhi Wang 已提交
454 455 456 457 458 459 460 461 462 463 464 465
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
	/* release all the shadow batch buffer */
	if (!list_empty(&workload->shadow_bb)) {
		struct intel_shadow_bb_entry *entry_obj =
			list_first_entry(&workload->shadow_bb,
					 struct intel_shadow_bb_entry,
					 list);
		struct intel_shadow_bb_entry *temp;

		list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
					 list) {
466
			i915_gem_object_unpin_map(entry_obj->obj);
467
			i915_gem_object_put(entry_obj->obj);
Z
Zhi Wang 已提交
468 469 470 471 472 473
			list_del(&entry_obj->list);
			kfree(entry_obj);
		}
	}
}

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct execlist_ctx_descriptor_format ctx[2];
	int ring_id = workload->ring_id;
	int ret;

	ret = intel_vgpu_pin_mm(workload->shadow_mm);
	if (ret) {
		gvt_vgpu_err("fail to vgpu pin mm\n");
		goto out;
	}

	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
	if (ret) {
		gvt_vgpu_err("fail to vgpu sync oos pages\n");
		goto err_unpin_mm;
	}

	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
	if (ret) {
		gvt_vgpu_err("fail to flush post shadow\n");
		goto err_unpin_mm;
	}

	ret = prepare_shadow_batch_buffer(workload);
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
		goto err_unpin_mm;
	}

	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
		goto err_shadow_batch;
	}

	if (!workload->emulate_schedule_in)
		return 0;

514 515
	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531

	ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
	if (!ret)
		goto out;
	else
		gvt_vgpu_err("fail to emulate execlist schedule in\n");

	release_shadow_wa_ctx(&workload->wa_ctx);
err_shadow_batch:
	release_shadow_batch_buffer(workload);
err_unpin_mm:
	intel_vgpu_unpin_mm(workload->shadow_mm);
out:
	return ret;
}

Z
Zhi Wang 已提交
532 533 534
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
535 536
	int ring_id = workload->ring_id;
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
Z
Zhi Wang 已提交
537
	struct intel_vgpu_workload *next_workload;
538
	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
Z
Zhi Wang 已提交
539 540 541 542 543 544
	bool lite_restore = false;
	int ret;

	gvt_dbg_el("complete workload %p status %d\n", workload,
			workload->status);

545 546 547 548
	if (!workload->status) {
		release_shadow_batch_buffer(workload);
		release_shadow_wa_ctx(&workload->wa_ctx);
	}
Z
Zhi Wang 已提交
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
		/* if workload->status is not successful means HW GPU
		 * has occurred GPU hang or something wrong with i915/GVT,
		 * and GVT won't inject context switch interrupt to guest.
		 * So this error is a vGPU hang actually to the guest.
		 * According to this we should emunlate a vGPU hang. If
		 * there are pending workloads which are already submitted
		 * from guest, we should clean them up like HW GPU does.
		 *
		 * if it is in middle of engine resetting, the pending
		 * workloads won't be submitted to HW GPU and will be
		 * cleaned up during the resetting process later, so doing
		 * the workload clean up here doesn't have any impact.
		 **/
		clean_workloads(vgpu, ENGINE_MASK(ring_id));
Z
Zhi Wang 已提交
565
		goto out;
566
	}
Z
Zhi Wang 已提交
567

568
	if (!list_empty(workload_q_head(vgpu, ring_id))) {
Z
Zhi Wang 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
		struct execlist_ctx_descriptor_format *this_desc, *next_desc;

		next_workload = container_of(next,
				struct intel_vgpu_workload, list);
		this_desc = &workload->ctx_desc;
		next_desc = &next_workload->ctx_desc;

		lite_restore = same_context(this_desc, next_desc);
	}

	if (lite_restore) {
		gvt_dbg_el("next context == current - no schedule-out\n");
		free_workload(workload);
		return 0;
	}

	ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
	if (ret)
		goto err;
out:
	free_workload(workload);
	return 0;
err:
	free_workload(workload);
	return ret;
}

#define RING_CTX_OFF(x) \
	offsetof(struct execlist_ring_context, x)

static void read_guest_pdps(struct intel_vgpu *vgpu,
		u64 ring_context_gpa, u32 pdp[8])
{
	u64 gpa;
	int i;

	gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);

	for (i = 0; i < 8; i++)
		intel_gvt_hypervisor_read_gpa(vgpu,
				gpa + i * 8, &pdp[7 - i], 4);
}

static int prepare_mm(struct intel_vgpu_workload *workload)
{
	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
	struct intel_vgpu_mm *mm;
616
	struct intel_vgpu *vgpu = workload->vgpu;
Z
Zhi Wang 已提交
617 618 619 620 621 622 623 624
	int page_table_level;
	u32 pdp[8];

	if (desc->addressing_mode == 1) { /* legacy 32-bit */
		page_table_level = 3;
	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
		page_table_level = 4;
	} else {
625
		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
Z
Zhi Wang 已提交
626 627 628 629 630 631 632 633 634 635 636 637 638
		return -EINVAL;
	}

	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);

	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
	if (mm) {
		intel_gvt_mm_reference(mm);
	} else {

		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
				pdp, page_table_level, 0);
		if (IS_ERR(mm)) {
639
			gvt_vgpu_err("fail to create mm object.\n");
Z
Zhi Wang 已提交
640 641 642 643 644 645 646 647 648 649 650
			return PTR_ERR(mm);
		}
	}
	workload->shadow_mm = mm;
	return 0;
}

#define get_last_workload(q) \
	(list_empty(q) ? NULL : container_of(q->prev, \
	struct intel_vgpu_workload, list))

651
static int submit_context(struct intel_vgpu *vgpu, int ring_id,
Z
Zhi Wang 已提交
652 653 654 655 656 657
		struct execlist_ctx_descriptor_format *desc,
		bool emulate_schedule_in)
{
	struct list_head *q = workload_q_head(vgpu, ring_id);
	struct intel_vgpu_workload *last_workload = get_last_workload(q);
	struct intel_vgpu_workload *workload = NULL;
658
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Z
Zhi Wang 已提交
659
	u64 ring_context_gpa;
Z
Zhi Wang 已提交
660
	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
Z
Zhi Wang 已提交
661 662 663 664 665
	int ret;

	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
			(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
666
		gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
Z
Zhi Wang 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
		return -EINVAL;
	}

	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(ring_header.val), &head, 4);

	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(ring_tail.val), &tail, 4);

	head &= RB_HEAD_OFF_MASK;
	tail &= RB_TAIL_OFF_MASK;

	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
		gvt_dbg_el("ctx head %x real head %lx\n", head,
				last_workload->rb_tail);
		/*
		 * cannot use guest context head pointer here,
		 * as it might not be updated at this time
		 */
		head = last_workload->rb_tail;
	}

	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);

	workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
	if (!workload)
		return -ENOMEM;

	/* record some ring buffer register values for scan and shadow */
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(rb_start.val), &start, 4);
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);

	INIT_LIST_HEAD(&workload->list);
Z
Zhi Wang 已提交
705
	INIT_LIST_HEAD(&workload->shadow_bb);
Z
Zhi Wang 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721

	init_waitqueue_head(&workload->shadow_ctx_status_wq);
	atomic_set(&workload->shadow_ctx_active, 0);

	workload->vgpu = vgpu;
	workload->ring_id = ring_id;
	workload->ctx_desc = *desc;
	workload->ring_context_gpa = ring_context_gpa;
	workload->rb_head = head;
	workload->rb_tail = tail;
	workload->rb_start = start;
	workload->rb_ctl = ctl;
	workload->prepare = prepare_execlist_workload;
	workload->complete = complete_execlist_workload;
	workload->status = -EINPROGRESS;
	workload->emulate_schedule_in = emulate_schedule_in;
722
	workload->shadowed = false;
Z
Zhi Wang 已提交
723

Z
Zhi Wang 已提交
724 725 726 727 728 729 730 731 732 733 734 735 736
	if (ring_id == RCS) {
		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
			RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);

		workload->wa_ctx.indirect_ctx.guest_gma =
			indirect_ctx & INDIRECT_CTX_ADDR_MASK;
		workload->wa_ctx.indirect_ctx.size =
			(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
			CACHELINE_BYTES;
		workload->wa_ctx.per_ctx.guest_gma =
			per_ctx & PER_CTX_ADDR_MASK;
737
		workload->wa_ctx.per_ctx.valid = per_ctx & 1;
Z
Zhi Wang 已提交
738 739
	}

Z
Zhi Wang 已提交
740
	if (emulate_schedule_in)
741
		workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
Z
Zhi Wang 已提交
742 743 744 745 746 747 748 749 750 751 752 753 754

	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
			workload, ring_id, head, tail, start, ctl);

	gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
			emulate_schedule_in);

	ret = prepare_mm(workload);
	if (ret) {
		kmem_cache_free(vgpu->workloads, workload);
		return ret;
	}

755 756 757 758
	/* Only scan and shadow the first workload in the queue
	 * as there is only one pre-allocated buf-obj for shadow.
	 */
	if (list_empty(workload_q_head(vgpu, ring_id))) {
759
		intel_runtime_pm_get(dev_priv);
760 761 762
		mutex_lock(&dev_priv->drm.struct_mutex);
		intel_gvt_scan_and_shadow_workload(workload);
		mutex_unlock(&dev_priv->drm.struct_mutex);
763
		intel_runtime_pm_put(dev_priv);
764 765
	}

Z
Zhi Wang 已提交
766 767 768 769 770 771 772
	queue_workload(workload);
	return 0;
}

int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
773
	struct execlist_ctx_descriptor_format *desc[2];
774
	int i, ret;
Z
Zhi Wang 已提交
775

776 777
	desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
	desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
Z
Zhi Wang 已提交
778

779
	if (!desc[0]->valid) {
780 781 782
		gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
		goto inv_desc;
	}
Z
Zhi Wang 已提交
783

784
	for (i = 0; i < ARRAY_SIZE(desc); i++) {
785
		if (!desc[i]->valid)
Z
Zhi Wang 已提交
786
			continue;
787
		if (!desc[i]->privilege_access) {
788
			gvt_vgpu_err("unexpected GGTT elsp submission\n");
789
			goto inv_desc;
Z
Zhi Wang 已提交
790 791 792 793
		}
	}

	/* submit workload */
794
	for (i = 0; i < ARRAY_SIZE(desc); i++) {
795
		if (!desc[i]->valid)
796
			continue;
797
		ret = submit_context(vgpu, ring_id, desc[i], i == 0);
Z
Zhi Wang 已提交
798
		if (ret) {
799
			gvt_vgpu_err("failed to submit desc %d\n", i);
Z
Zhi Wang 已提交
800 801 802
			return ret;
		}
	}
803

Z
Zhi Wang 已提交
804
	return 0;
805 806 807

inv_desc:
	gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
808
		     desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw);
809
	return -EINVAL;
Z
Zhi Wang 已提交
810 811
}

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
	struct execlist_context_status_pointer_format ctx_status_ptr;
	u32 ctx_status_ptr_reg;

	memset(execlist, 0, sizeof(*execlist));

	execlist->vgpu = vgpu;
	execlist->ring_id = ring_id;
	execlist->slot[0].index = 0;
	execlist->slot[1].index = 1;

	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
			_EL_OFFSET_STATUS_PTR);

	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
829 830
	ctx_status_ptr.read_ptr = 0;
	ctx_status_ptr.write_ptr = 0x7;
831 832 833
	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}

834 835 836 837 838 839 840 841 842 843 844 845 846 847
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	unsigned int tmp;

	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&vgpu->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			free_workload(pos);
		}
848 849

		clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
850 851 852
	}
}

Z
Zhi Wang 已提交
853 854
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
855 856 857
	enum intel_engine_id i;
	struct intel_engine_cs *engine;

858
	clean_workloads(vgpu, ALL_ENGINES);
859 860 861 862 863 864

	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		kfree(vgpu->reserve_ring_buffer_va[i]);
		vgpu->reserve_ring_buffer_va[i] = NULL;
		vgpu->reserve_ring_buffer_size[i] = 0;
	}
Z
Zhi Wang 已提交
865 866
}

867
#define RESERVE_RING_BUFFER_SIZE		((1 * PAGE_SIZE)/8)
868 869
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
870 871
	enum intel_engine_id i;
	struct intel_engine_cs *engine;
872

873
	for_each_engine(engine, vgpu->gvt->dev_priv, i)
874 875
		init_vgpu_execlist(vgpu, i);

876 877 878 879 880 881 882 883 884 885
	/* each ring has a shadow ring buffer until vgpu destroyed */
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		vgpu->reserve_ring_buffer_va[i] =
			kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
		if (!vgpu->reserve_ring_buffer_va[i]) {
			gvt_vgpu_err("fail to alloc reserve ring buffer\n");
			goto out;
		}
		vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
	}
886
	return 0;
887 888 889 890 891 892 893 894 895
out:
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		if (vgpu->reserve_ring_buffer_size[i]) {
			kfree(vgpu->reserve_ring_buffer_va[i]);
			vgpu->reserve_ring_buffer_va[i] = NULL;
			vgpu->reserve_ring_buffer_size[i] = 0;
		}
	}
	return -ENOMEM;
896
}
Z
Zhi Wang 已提交
897 898

void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
899
		unsigned long engine_mask)
Z
Zhi Wang 已提交
900
{
901 902 903
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	unsigned int tmp;
Z
Zhi Wang 已提交
904

905 906
	clean_workloads(vgpu, engine_mask);
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
907
		init_vgpu_execlist(vgpu, engine->id);
Z
Zhi Wang 已提交
908
}