intel_guc.c 16.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2014-2017 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include "gt/intel_gt.h"
26
#include "intel_guc.h"
27
#include "intel_guc_ads.h"
28
#include "intel_guc_submission.h"
29 30 31 32
#include "i915_drv.h"

static void gen8_guc_raise_irq(struct intel_guc *guc)
{
33
	struct intel_gt *gt = guc_to_gt(guc);
34

35
	intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
36 37
}

38 39
static void gen11_guc_raise_irq(struct intel_guc *guc)
{
40
	struct intel_gt *gt = guc_to_gt(guc);
41

42
	intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
43 44
}

45 46 47 48 49 50 51 52 53 54 55
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
56
	struct intel_gt *gt = guc_to_gt(guc);
57 58 59
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

60
	if (INTEL_GEN(gt->i915) >= 11) {
61 62 63 64 65 66 67 68
		guc->send_regs.base =
				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
	} else {
		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
	}
69 70

	for (i = 0; i < guc->send_regs.count; i++) {
71
		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
72 73 74 75 76 77 78 79
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

void intel_guc_init_early(struct intel_guc *guc)
{
80
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
81

82
	intel_guc_fw_init_early(guc);
83
	intel_guc_ct_init_early(&guc->ct);
84
	intel_guc_log_init_early(&guc->log);
85
	intel_guc_submission_init_early(guc);
86 87

	mutex_init(&guc->send_mutex);
88
	spin_lock_init(&guc->irq_lock);
89
	guc->send = intel_guc_send_nop;
90
	guc->handler = intel_guc_to_host_event_handler_nop;
91
	if (INTEL_GEN(i915) >= 11) {
92
		guc->notify = gen11_guc_raise_irq;
93 94 95
		guc->interrupts.reset = gen11_reset_guc_interrupts;
		guc->interrupts.enable = gen11_enable_guc_interrupts;
		guc->interrupts.disable = gen11_disable_guc_interrupts;
96
	} else {
97
		guc->notify = gen8_guc_raise_irq;
98 99 100 101
		guc->interrupts.reset = gen9_reset_guc_interrupts;
		guc->interrupts.enable = gen9_enable_guc_interrupts;
		guc->interrupts.disable = gen9_disable_guc_interrupts;
	}
102 103
}

104 105 106 107 108 109 110 111 112 113 114
static int guc_shared_data_create(struct intel_guc *guc)
{
	struct i915_vma *vma;
	void *vaddr;

	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
115
		i915_vma_unpin_and_release(&vma, 0);
116 117 118 119 120 121 122 123 124 125 126
		return PTR_ERR(vaddr);
	}

	guc->shared_data = vma;
	guc->shared_data_vaddr = vaddr;

	return 0;
}

static void guc_shared_data_destroy(struct intel_guc *guc)
{
127
	i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
128 129
}

130
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
131
{
132
	u32 level = intel_guc_log_get_level(&guc->log);
133
	u32 flags = 0;
134

135
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
136 137 138 139
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
140

141
	return flags;
142 143
}

144 145 146 147
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

148
	if (!intel_uc_supports_guc_submission(&guc_to_gt(guc)->uc))
149 150 151 152
		flags |= GUC_CTL_DISABLE_SCHEDULER;

	return flags;
}
153

154 155 156 157
static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
	u32 flags = 0;

158
	if (intel_uc_supports_guc_submission(&guc_to_gt(guc)->uc)) {
159 160 161 162 163 164 165 166 167 168 169 170
		u32 ctxnum, base;

		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;

		base >>= PAGE_SHIFT;
		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
	}
	return flags;
}

171 172 173 174 175
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

176 177 178 179
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
180
	#define UNIT SZ_4K
181 182
	#define FLAG 0
	#endif
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
200
		FLAG |
201 202 203
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
204 205
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

206
	#undef UNIT
207
	#undef FLAG
208

209 210 211
	return flags;
}

212 213 214 215 216 217 218 219
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	u32 flags = ads << GUC_ADS_ADDR_SHIFT;

	return flags;
}

220 221 222 223 224
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
225
static void guc_init_params(struct intel_guc *guc)
226
{
227
	u32 *params = guc->params;
228 229
	int i;

230
	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
231

232 233
	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
234
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
235
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
236
	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
237

238 239
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
240 241 242 243 244 245 246 247 248 249 250
}

/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_write_params(struct intel_guc *guc)
{
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
	int i;
251

252 253 254 255 256
	/*
	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
257
	intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
258

259
	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
260 261

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
262
		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
263

264
	intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
265 266
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
int intel_guc_init(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
	int ret;

	ret = intel_uc_fw_init(&guc->fw);
	if (ret)
		goto err_fetch;

	ret = guc_shared_data_create(guc);
	if (ret)
		goto err_fw;
	GEM_BUG_ON(!guc->shared_data);

	ret = intel_guc_log_create(&guc->log);
	if (ret)
		goto err_shared;

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

	ret = intel_guc_ct_init(&guc->ct);
	if (ret)
		goto err_ads;

294
	if (intel_uc_supports_guc_submission(&gt->uc)) {
295 296 297 298 299 300 301 302 303
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = intel_guc_submission_init(guc);
		if (ret)
			goto err_ct;
	}

304 305 306 307 308 309 310 311
	/* now that everything is perma-pinned, initialize the parameters */
	guc_init_params(guc);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(gt->ggtt);

	return 0;

312 313
err_ct:
	intel_guc_ct_fini(&guc->ct);
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
err_ads:
	intel_guc_ads_destroy(guc);
err_log:
	intel_guc_log_destroy(&guc->log);
err_shared:
	guc_shared_data_destroy(guc);
err_fw:
	intel_uc_fw_fini(&guc->fw);
err_fetch:
	intel_uc_fw_cleanup_fetch(&guc->fw);
	return ret;
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	i915_ggtt_disable_guc(gt->ggtt);

333
	if (intel_uc_supports_guc_submission(&gt->uc))
334 335
		intel_guc_submission_fini(guc);

336 337 338 339 340 341 342 343 344
	intel_guc_ct_fini(&guc->ct);

	intel_guc_ads_destroy(guc);
	intel_guc_log_destroy(&guc->log);
	guc_shared_data_destroy(guc);
	intel_uc_fw_fini(&guc->fw);
	intel_uc_fw_cleanup_fetch(&guc->fw);
}

345 346
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
		       u32 *response_buf, u32 response_buf_size)
347 348 349 350 351
{
	WARN(1, "Unexpected send: action=%#x\n", *action);
	return -ENODEV;
}

352 353 354 355 356
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
{
	WARN(1, "Unexpected event: no suitable handler\n");
}

357 358 359
/*
 * This function implements the MMIO based host to GuC interface.
 */
360 361
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
			u32 *response_buf, u32 response_buf_size)
362
{
363
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
364 365 366 367 368 369 370
	u32 status;
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

371 372 373
	/* We expect only action code */
	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);

374
	/* If CT is available, we expect to use MMIO only during init/fini */
375 376
	GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		   *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
377 378

	mutex_lock(&guc->send_mutex);
379
	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
380 381

	for (i = 0; i < len; i++)
382
		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
383

384
	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
385 386 387 388 389 390 391

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
392
	ret = __intel_wait_for_register_fw(uncore,
393
					   guc_send_reg(guc, 0),
394 395 396
					   INTEL_GUC_MSG_TYPE_MASK,
					   INTEL_GUC_MSG_TYPE_RESPONSE <<
					   INTEL_GUC_MSG_TYPE_SHIFT,
397
					   10, 10, &status);
398 399 400
	/* If GuC explicitly returned an error, convert it to -EIO */
	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
		ret = -EIO;
401

402
	if (ret) {
403 404
		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
			  action[0], ret, status);
405
		goto out;
406 407
	}

408 409 410 411
	if (response_buf) {
		int count = min(response_buf_size, guc->send_regs.count - 1);

		for (i = 0; i < count; i++)
412 413
			response_buf[i] = intel_uncore_read(uncore,
							    guc_send_reg(guc, i + 1));
414 415 416 417 418 419
	}

	/* Use data from the GuC response as our return value */
	ret = INTEL_GUC_MSG_TO_DATA(status);

out:
420
	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
421 422 423 424 425
	mutex_unlock(&guc->send_mutex);

	return ret;
}

426 427
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
				       const u32 *payload, u32 len)
428
{
429 430 431 432 433
	u32 msg;

	if (unlikely(!len))
		return -EPROTO;

434
	/* Make sure to handle only enabled messages */
435
	msg = payload[0] & guc->msg_enabled_mask;
436

437
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
438
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
439
		intel_guc_log_handle_flush_event(&guc->log);
440 441

	return 0;
442 443
}

444 445
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
446
	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
447 448 449
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
450
	/* WaRsDisableCoarsePowerGating:skl,cnl */
451
	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

481 482 483
/**
 * intel_guc_suspend() - notify GuC entering suspend state
 * @guc:	the guc
484
 */
485
int intel_guc_suspend(struct intel_guc *guc)
486
{
487
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
488 489
	int ret;
	u32 status;
490 491 492 493 494 495 496 497 498 499 500 501 502 503
	u32 action[] = {
		INTEL_GUC_ACTION_ENTER_S_STATE,
		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
	};

	/*
	 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
	 * and then returns, so waiting on the H2G is not enough to guarantee
	 * GuC is done. When all the processing is done, GuC writes
	 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
	 * on that. Note that GuC does not ensure that the value in the register
	 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
	 * in progress so we need to take care of that ourselves as well.
	 */
504

505 506
	intel_uncore_write(uncore, SOFT_SCRATCH(14),
			   INTEL_GUC_SLEEP_STATE_INVALID_MASK);
507

508
	ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
509 510 511
	if (ret)
		return ret;

512
	ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
					0, 0, 10, &status);
	if (ret)
		return ret;

	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
		DRM_ERROR("GuC failed to change sleep state. "
			  "action=0x%x, err=%u\n",
			  action[0], status);
		return -EIO;
	}

	return 0;
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
/**
 * intel_guc_reset_engine() - ask GuC to reset an engine
 * @guc:	intel_guc structure
 * @engine:	engine to be reset
 */
int intel_guc_reset_engine(struct intel_guc *guc,
			   struct intel_engine_cs *engine)
{
	u32 data[7];

	GEM_BUG_ON(!guc->execbuf_client);

	data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
	data[1] = engine->guc_id;
	data[2] = 0;
	data[3] = 0;
	data[4] = 0;
	data[5] = guc->execbuf_client->stage_id;
546
	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
547 548 549 550

	return intel_guc_send(guc, data, ARRAY_SIZE(data));
}

551 552
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
553
 * @guc:	the guc
554
 */
555
int intel_guc_resume(struct intel_guc *guc)
556
{
557
	u32 action[] = {
558 559 560
		INTEL_GUC_ACTION_EXIT_S_STATE,
		GUC_POWER_D0,
	};
561

562
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
563 564
}

565 566 567
/**
 * DOC: GuC Address Space
 *
568
 * The layout of GuC address space is shown below:
569
 *
570
 * ::
571
 *
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
 *     +===========> +====================+ <== FFFF_FFFF
 *     ^             |      Reserved      |
 *     |             +====================+ <== GUC_GGTT_TOP
 *     |             |                    |
 *     |             |        DRAM        |
 *    GuC            |                    |
 *  Address    +===> +====================+ <== GuC ggtt_pin_bias
 *   Space     ^     |                    |
 *     |       |     |                    |
 *     |      GuC    |        GuC         |
 *     |     WOPCM   |       WOPCM        |
 *     |      Size   |                    |
 *     |       |     |                    |
 *     v       v     |                    |
 *     +=======+===> +====================+ <== 0000_0000
587
 *
588
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
589
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
590
 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
591 592
 */

593 594 595 596 597 598 599 600
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
601
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
602 603 604 605 606 607
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
608
	struct intel_gt *gt = guc_to_gt(guc);
609 610
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
611
	u64 flags;
612 613
	int ret;

614
	obj = i915_gem_object_create_shmem(gt->i915, size);
615 616 617
	if (IS_ERR(obj))
		return ERR_CAST(obj);

618
	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
619 620 621
	if (IS_ERR(vma))
		goto err;

622 623
	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
	ret = i915_vma_pin(vma, 0, 0, flags);
624 625 626 627 628
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

629
	return i915_vma_make_unshrinkable(vma);
630 631 632 633 634

err:
	i915_gem_object_put(obj);
	return vma;
}