intel_guc.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2014-2017 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "intel_guc.h"
26
#include "intel_guc_ads.h"
27
#include "intel_guc_submission.h"
28 29
#include "i915_drv.h"

30 31
static void guc_init_ggtt_pin_bias(struct intel_guc *guc);

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}

static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

	guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
	guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;

	for (i = 0; i < guc->send_regs.count; i++) {
		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

void intel_guc_init_early(struct intel_guc *guc)
{
67
	intel_guc_fw_init_early(guc);
68
	intel_guc_ct_init_early(&guc->ct);
69
	intel_guc_log_init_early(&guc->log);
70 71

	mutex_init(&guc->send_mutex);
72
	spin_lock_init(&guc->irq_lock);
73
	guc->send = intel_guc_send_nop;
74
	guc->handler = intel_guc_to_host_event_handler_nop;
75 76 77
	guc->notify = gen8_guc_raise_irq;
}

78
static int guc_init_wq(struct intel_guc *guc)
79 80 81 82 83 84 85 86 87 88 89 90 91 92
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	/*
	 * GuC log buffer flush work item has to do register access to
	 * send the ack to GuC and this work item, if not synced before
	 * suspend, can potentially get executed after the GFX device is
	 * suspended.
	 * By marking the WQ as freezable, we don't have to bother about
	 * flushing of this work item from the suspend hooks, the pending
	 * work item if any will be either executed before the suspend
	 * or scheduled later on resume. This way the handling of work
	 * item can be kept same between system suspend & rpm suspend.
	 */
93 94 95 96
	guc->log.relay.flush_wq =
		alloc_ordered_workqueue("i915-guc_log",
					WQ_HIGHPRI | WQ_FREEZABLE);
	if (!guc->log.relay.flush_wq) {
97
		DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
98
		return -ENOMEM;
99
	}
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

	/*
	 * Even though both sending GuC action, and adding a new workitem to
	 * GuC workqueue are serialized (each with its own locking), since
	 * we're using mutliple engines, it's possible that we're going to
	 * issue a preempt request with two (or more - each for different
	 * engine) workitems in GuC queue. In this situation, GuC may submit
	 * all of them, which will make us very confused.
	 * Our preemption contexts may even already be complete - before we
	 * even had the chance to sent the preempt action to GuC!. Rather
	 * than introducing yet another lock, we can just use ordered workqueue
	 * to make sure we're always sending a single preemption request with a
	 * single workitem.
	 */
	if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
	    USES_GUC_SUBMISSION(dev_priv)) {
		guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
							  WQ_HIGHPRI);
		if (!guc->preempt_wq) {
119
			destroy_workqueue(guc->log.relay.flush_wq);
120 121
			DRM_ERROR("Couldn't allocate workqueue for GuC "
				  "preemption\n");
122 123 124 125 126 127 128
			return -ENOMEM;
		}
	}

	return 0;
}

129
static void guc_fini_wq(struct intel_guc *guc)
130 131 132 133 134 135 136
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
	    USES_GUC_SUBMISSION(dev_priv))
		destroy_workqueue(guc->preempt_wq);

137
	destroy_workqueue(guc->log.relay.flush_wq);
138 139
}

140 141
int intel_guc_init_misc(struct intel_guc *guc)
{
142
	struct drm_i915_private *i915 = guc_to_i915(guc);
143 144 145 146 147 148 149 150
	int ret;

	guc_init_ggtt_pin_bias(guc);

	ret = guc_init_wq(guc);
	if (ret)
		return ret;

151 152
	intel_uc_fw_fetch(i915, &guc->fw);

153 154 155 156 157
	return 0;
}

void intel_guc_fini_misc(struct intel_guc *guc)
{
158
	intel_uc_fw_fini(&guc->fw);
159 160 161
	guc_fini_wq(guc);
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static int guc_shared_data_create(struct intel_guc *guc)
{
	struct i915_vma *vma;
	void *vaddr;

	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		i915_vma_unpin_and_release(&vma);
		return PTR_ERR(vaddr);
	}

	guc->shared_data = vma;
	guc->shared_data_vaddr = vaddr;

	return 0;
}

static void guc_shared_data_destroy(struct intel_guc *guc)
{
	i915_gem_object_unpin_map(guc->shared_data->obj);
	i915_vma_unpin_and_release(&guc->shared_data);
}

int intel_guc_init(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	ret = guc_shared_data_create(guc);
	if (ret)
196
		goto err_fetch;
197 198
	GEM_BUG_ON(!guc->shared_data);

199
	ret = intel_guc_log_create(&guc->log);
200 201 202 203 204 205 206 207
	if (ret)
		goto err_shared;

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

208 209 210 211
	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(dev_priv);

	return 0;
212 213

err_log:
214
	intel_guc_log_destroy(&guc->log);
215 216
err_shared:
	guc_shared_data_destroy(guc);
217 218
err_fetch:
	intel_uc_fw_fini(&guc->fw);
219
	return ret;
220 221 222 223 224 225 226
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	i915_ggtt_disable_guc(dev_priv);
227
	intel_guc_ads_destroy(guc);
228
	intel_guc_log_destroy(&guc->log);
229
	guc_shared_data_destroy(guc);
230
	intel_uc_fw_fini(&guc->fw);
231 232
}

233
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
234
{
235
	u32 level = intel_guc_log_get_level(&guc->log);
236 237 238 239 240
	u32 flags;
	u32 ads;

	ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
241

242
	if (!GUC_LOG_LEVEL_IS_ENABLED(level))
243 244
		flags |= GUC_LOG_DEFAULT_DISABLED;

245
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
246 247 248 249
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
250

251
	return flags;
252 253
}

254 255 256 257 258 259 260 261 262 263 264 265 266
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

	flags |=  GUC_CTL_VCS2_ENABLED;

	if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
		flags |= GUC_CTL_KERNEL_SUBMISSIONS;
	else
		flags |= GUC_CTL_DISABLE_SCHEDULER;

	return flags;
}
267

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
	u32 flags = 0;

	if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
		u32 ctxnum, base;

		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;

		base >>= PAGE_SHIFT;
		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
	}
	return flags;
}

285 286 287 288 289
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

290 291 292 293
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
294
	#define UNIT SZ_4K
295 296
	#define FLAG 0
	#endif
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
314
		FLAG |
315 316 317
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
318 319
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

320
	#undef UNIT
321
	#undef FLAG
322

323 324 325
	return flags;
}

326 327 328 329 330 331 332 333 334 335 336
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_init_params(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 params[GUC_CTL_MAX_DWORDS];
	int i;

337
	memset(params, 0, sizeof(params));
338 339 340 341 342 343 344 345 346 347 348

	/*
	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
	 * second. This ARAR is calculated by:
	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
	 */
	params[GUC_CTL_ARAT_HIGH] = 0;
	params[GUC_CTL_ARAT_LOW] = 100000000;

	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;

349
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
350
	params[GUC_CTL_LOG_PARAMS]  = guc_ctl_log_params_flags(guc);
351
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
352
	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
353

354 355 356
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	/*
	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);

	I915_WRITE(SOFT_SCRATCH(0), 0);

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
}

372 373
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
		       u32 *response_buf, u32 response_buf_size)
374 375 376 377 378
{
	WARN(1, "Unexpected send: action=%#x\n", *action);
	return -ENODEV;
}

379 380 381 382 383
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
{
	WARN(1, "Unexpected event: no suitable handler\n");
}

384 385 386
/*
 * This function implements the MMIO based host to GuC interface.
 */
387 388
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
			u32 *response_buf, u32 response_buf_size)
389 390 391 392 393 394 395 396 397
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 status;
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

398 399 400
	/* We expect only action code */
	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	/* If CT is available, we expect to use MMIO only during init/fini */
	GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
		*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);

	mutex_lock(&guc->send_mutex);
	intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);

	for (i = 0; i < len; i++)
		I915_WRITE(guc_send_reg(guc, i), action[i]);

	POSTING_READ(guc_send_reg(guc, i - 1));

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
	ret = __intel_wait_for_register_fw(dev_priv,
					   guc_send_reg(guc, 0),
422 423 424
					   INTEL_GUC_MSG_TYPE_MASK,
					   INTEL_GUC_MSG_TYPE_RESPONSE <<
					   INTEL_GUC_MSG_TYPE_SHIFT,
425
					   10, 10, &status);
426 427 428
	/* If GuC explicitly returned an error, convert it to -EIO */
	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
		ret = -EIO;
429

430
	if (ret) {
431 432
		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
			  action[0], ret, status);
433
		goto out;
434 435
	}

436 437 438 439 440 441 442 443 444 445 446
	if (response_buf) {
		int count = min(response_buf_size, guc->send_regs.count - 1);

		for (i = 0; i < count; i++)
			response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
	}

	/* Use data from the GuC response as our return value */
	ret = INTEL_GUC_MSG_TO_DATA(status);

out:
447 448 449 450 451 452
	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
	mutex_unlock(&guc->send_mutex);

	return ret;
}

453
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
454 455
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
456
	u32 msg, val;
457 458 459 460 461 462 463 464 465 466 467 468

	/*
	 * Sample the log buffer flush related bits & clear them out now
	 * itself from the message identity register to minimize the
	 * probability of losing a flush interrupt, when there are back
	 * to back flush interrupts.
	 * There can be a new flush interrupt, for different log buffer
	 * type (like for ISR), whilst Host is handling one (for DPC).
	 * Since same bit is used in message register for ISR & DPC, it
	 * could happen that GuC sets the bit for 2nd interrupt but Host
	 * clears out the bit on handling the 1st interrupt.
	 */
469 470 471 472 473 474
	spin_lock(&guc->irq_lock);
	val = I915_READ(SOFT_SCRATCH(15));
	msg = val & guc->msg_enabled_mask;
	I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
	spin_unlock(&guc->irq_lock);

475 476 477 478 479 480 481 482
	intel_guc_to_host_process_recv_msg(guc, msg);
}

void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
{
	/* Make sure to handle only enabled messages */
	msg &= guc->msg_enabled_mask;

483
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
484
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
485
		intel_guc_log_handle_flush_event(&guc->log);
486 487
}

488 489 490 491 492 493
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
494
	/* WaRsDisableCoarsePowerGating:skl,cnl */
495
	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/**
 * intel_guc_suspend() - notify GuC entering suspend state
527
 * @guc:	the guc
528
 */
529
int intel_guc_suspend(struct intel_guc *guc)
530
{
531 532 533
	u32 data[] = {
		INTEL_GUC_ACTION_ENTER_S_STATE,
		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
534
		intel_guc_ggtt_offset(guc, guc->shared_data)
535
	};
536 537 538 539

	return intel_guc_send(guc, data, ARRAY_SIZE(data));
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
/**
 * intel_guc_reset_engine() - ask GuC to reset an engine
 * @guc:	intel_guc structure
 * @engine:	engine to be reset
 */
int intel_guc_reset_engine(struct intel_guc *guc,
			   struct intel_engine_cs *engine)
{
	u32 data[7];

	GEM_BUG_ON(!guc->execbuf_client);

	data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
	data[1] = engine->guc_id;
	data[2] = 0;
	data[3] = 0;
	data[4] = 0;
	data[5] = guc->execbuf_client->stage_id;
558
	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
559 560 561 562

	return intel_guc_send(guc, data, ARRAY_SIZE(data));
}

563 564
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
565
 * @guc:	the guc
566
 */
567
int intel_guc_resume(struct intel_guc *guc)
568
{
569 570 571
	u32 data[] = {
		INTEL_GUC_ACTION_EXIT_S_STATE,
		GUC_POWER_D0,
572
		intel_guc_ggtt_offset(guc, guc->shared_data)
573
	};
574 575 576 577

	return intel_guc_send(guc, data, ARRAY_SIZE(data));
}

578 579 580
/**
 * DOC: GuC Address Space
 *
581
 * The layout of GuC address space is shown below:
582
 *
583
 * ::
584
 *
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
 *     +==============> +====================+ <== GUC_GGTT_TOP
 *     ^                |                    |
 *     |                |                    |
 *     |                |        DRAM        |
 *     |                |       Memory       |
 *     |                |                    |
 *    GuC               |                    |
 *  Address  +========> +====================+ <== WOPCM Top
 *   Space   ^          |   HW contexts RSVD |
 *     |     |          |        WOPCM       |
 *     |     |     +==> +--------------------+ <== GuC WOPCM Top
 *     |    GuC    ^    |                    |
 *     |    GGTT   |    |                    |
 *     |    Pin   GuC   |        GuC         |
 *     |    Bias WOPCM  |       WOPCM        |
 *     |     |    Size  |                    |
 *     |     |     |    |                    |
 *     v     v     v    |                    |
 *     +=====+=====+==> +====================+ <== GuC WOPCM Base
 *                      |   Non-GuC WOPCM    |
 *                      |   (HuC/Reserved)   |
 *                      +====================+ <== WOPCM Base
 *
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
 * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and
 * actual GuC WOPCM size.
612 613 614
 */

/**
615
 * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
616 617 618 619 620
 * @guc: intel_guc structure.
 *
 * This function will calculate and initialize the ggtt_pin_bias value based on
 * overall WOPCM size and GuC WOPCM size.
 */
621
static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
622 623 624 625 626 627 628 629 630
{
	struct drm_i915_private *i915 = guc_to_i915(guc);

	GEM_BUG_ON(!i915->wopcm.size);
	GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base);

	guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base;
}

631 632 633 634 635 636 637 638
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
639
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int ret;

	obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

655
	vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
656 657 658 659
	if (IS_ERR(vma))
		goto err;

	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
660
			   PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
661 662 663 664 665 666 667 668 669 670 671
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

	return vma;

err:
	i915_gem_object_put(obj);
	return vma;
}