intel_guc.c 19.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2014-2017 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "intel_guc.h"
26
#include "intel_guc_ads.h"
27
#include "intel_guc_submission.h"
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
#include "i915_drv.h"

static void gen8_guc_raise_irq(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}

static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

	guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53 54
	guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
	BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
55 56 57 58 59 60 61 62 63 64 65

	for (i = 0; i < guc->send_regs.count; i++) {
		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

void intel_guc_init_early(struct intel_guc *guc)
{
66
	intel_guc_fw_init_early(guc);
67
	intel_guc_ct_init_early(&guc->ct);
68
	intel_guc_log_init_early(&guc->log);
69 70

	mutex_init(&guc->send_mutex);
71
	spin_lock_init(&guc->irq_lock);
72
	guc->send = intel_guc_send_nop;
73
	guc->handler = intel_guc_to_host_event_handler_nop;
74 75 76
	guc->notify = gen8_guc_raise_irq;
}

77
static int guc_init_wq(struct intel_guc *guc)
78 79 80 81 82 83 84 85 86 87 88 89 90 91
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	/*
	 * GuC log buffer flush work item has to do register access to
	 * send the ack to GuC and this work item, if not synced before
	 * suspend, can potentially get executed after the GFX device is
	 * suspended.
	 * By marking the WQ as freezable, we don't have to bother about
	 * flushing of this work item from the suspend hooks, the pending
	 * work item if any will be either executed before the suspend
	 * or scheduled later on resume. This way the handling of work
	 * item can be kept same between system suspend & rpm suspend.
	 */
92 93 94 95
	guc->log.relay.flush_wq =
		alloc_ordered_workqueue("i915-guc_log",
					WQ_HIGHPRI | WQ_FREEZABLE);
	if (!guc->log.relay.flush_wq) {
96
		DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
97
		return -ENOMEM;
98
	}
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117

	/*
	 * Even though both sending GuC action, and adding a new workitem to
	 * GuC workqueue are serialized (each with its own locking), since
	 * we're using mutliple engines, it's possible that we're going to
	 * issue a preempt request with two (or more - each for different
	 * engine) workitems in GuC queue. In this situation, GuC may submit
	 * all of them, which will make us very confused.
	 * Our preemption contexts may even already be complete - before we
	 * even had the chance to sent the preempt action to GuC!. Rather
	 * than introducing yet another lock, we can just use ordered workqueue
	 * to make sure we're always sending a single preemption request with a
	 * single workitem.
	 */
	if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
	    USES_GUC_SUBMISSION(dev_priv)) {
		guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
							  WQ_HIGHPRI);
		if (!guc->preempt_wq) {
118
			destroy_workqueue(guc->log.relay.flush_wq);
119 120
			DRM_ERROR("Couldn't allocate workqueue for GuC "
				  "preemption\n");
121 122 123 124 125 126 127
			return -ENOMEM;
		}
	}

	return 0;
}

128
static void guc_fini_wq(struct intel_guc *guc)
129
{
130
	struct workqueue_struct *wq;
131

132 133 134
	wq = fetch_and_zero(&guc->preempt_wq);
	if (wq)
		destroy_workqueue(wq);
135

136 137 138
	wq = fetch_and_zero(&guc->log.relay.flush_wq);
	if (wq)
		destroy_workqueue(wq);
139 140
}

141 142
int intel_guc_init_misc(struct intel_guc *guc)
{
143
	struct drm_i915_private *i915 = guc_to_i915(guc);
144 145 146 147 148 149
	int ret;

	ret = guc_init_wq(guc);
	if (ret)
		return ret;

150 151
	intel_uc_fw_fetch(i915, &guc->fw);

152 153 154 155 156
	return 0;
}

void intel_guc_fini_misc(struct intel_guc *guc)
{
157
	intel_uc_fw_fini(&guc->fw);
158 159 160
	guc_fini_wq(guc);
}

161 162 163 164 165 166 167 168 169 170 171
static int guc_shared_data_create(struct intel_guc *guc)
{
	struct i915_vma *vma;
	void *vaddr;

	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
172
		i915_vma_unpin_and_release(&vma, 0);
173 174 175 176 177 178 179 180 181 182 183
		return PTR_ERR(vaddr);
	}

	guc->shared_data = vma;
	guc->shared_data_vaddr = vaddr;

	return 0;
}

static void guc_shared_data_destroy(struct intel_guc *guc)
{
184
	i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
185 186 187 188 189 190 191 192 193
}

int intel_guc_init(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	ret = guc_shared_data_create(guc);
	if (ret)
194
		goto err_fetch;
195 196
	GEM_BUG_ON(!guc->shared_data);

197
	ret = intel_guc_log_create(&guc->log);
198 199 200 201 202 203 204 205
	if (ret)
		goto err_shared;

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

206 207 208 209 210 211
	if (HAS_GUC_CT(dev_priv)) {
		ret = intel_guc_ct_init(&guc->ct);
		if (ret)
			goto err_ads;
	}

212 213 214 215
	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(dev_priv);

	return 0;
216

217 218
err_ads:
	intel_guc_ads_destroy(guc);
219
err_log:
220
	intel_guc_log_destroy(&guc->log);
221 222
err_shared:
	guc_shared_data_destroy(guc);
223 224
err_fetch:
	intel_uc_fw_fini(&guc->fw);
225
	return ret;
226 227 228 229 230 231 232
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	i915_ggtt_disable_guc(dev_priv);
233 234 235 236

	if (HAS_GUC_CT(dev_priv))
		intel_guc_ct_fini(&guc->ct);

237
	intel_guc_ads_destroy(guc);
238
	intel_guc_log_destroy(&guc->log);
239
	guc_shared_data_destroy(guc);
240
	intel_uc_fw_fini(&guc->fw);
241 242
}

243
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
244
{
245
	u32 level = intel_guc_log_get_level(&guc->log);
246 247 248 249 250
	u32 flags;
	u32 ads;

	ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
251

252
	if (!GUC_LOG_LEVEL_IS_ENABLED(level))
253 254
		flags |= GUC_LOG_DEFAULT_DISABLED;

255
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
256 257 258 259
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
260

261
	return flags;
262 263
}

264 265 266 267 268 269 270 271 272 273 274 275 276
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

	flags |=  GUC_CTL_VCS2_ENABLED;

	if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
		flags |= GUC_CTL_KERNEL_SUBMISSIONS;
	else
		flags |= GUC_CTL_DISABLE_SCHEDULER;

	return flags;
}
277

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
	u32 flags = 0;

	if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
		u32 ctxnum, base;

		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;

		base >>= PAGE_SHIFT;
		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
	}
	return flags;
}

295 296 297 298 299
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

300 301 302 303
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
304
	#define UNIT SZ_4K
305 306
	#define FLAG 0
	#endif
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
324
		FLAG |
325 326 327
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
328 329
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

330
	#undef UNIT
331
	#undef FLAG
332

333 334 335
	return flags;
}

336 337 338 339 340 341 342 343 344 345 346
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_init_params(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 params[GUC_CTL_MAX_DWORDS];
	int i;

347
	memset(params, 0, sizeof(params));
348 349 350 351 352 353 354 355 356 357 358

	/*
	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
	 * second. This ARAR is calculated by:
	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
	 */
	params[GUC_CTL_ARAT_HIGH] = 0;
	params[GUC_CTL_ARAT_LOW] = 100000000;

	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;

359
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
360
	params[GUC_CTL_LOG_PARAMS]  = guc_ctl_log_params_flags(guc);
361
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
362
	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
363

364 365 366
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);

367 368 369 370 371
	/*
	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
372
	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
373 374 375 376 377 378

	I915_WRITE(SOFT_SCRATCH(0), 0);

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);

379
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
380 381
}

382 383
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
		       u32 *response_buf, u32 response_buf_size)
384 385 386 387 388
{
	WARN(1, "Unexpected send: action=%#x\n", *action);
	return -ENODEV;
}

389 390 391 392 393
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
{
	WARN(1, "Unexpected event: no suitable handler\n");
}

394 395 396
/*
 * This function implements the MMIO based host to GuC interface.
 */
397 398
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
			u32 *response_buf, u32 response_buf_size)
399 400 401 402 403 404 405 406 407
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 status;
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

408 409 410
	/* We expect only action code */
	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);

411 412 413 414 415 416
	/* If CT is available, we expect to use MMIO only during init/fini */
	GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
		*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);

	mutex_lock(&guc->send_mutex);
417
	intel_uncore_forcewake_get(&dev_priv->uncore, guc->send_regs.fw_domains);
418 419 420 421 422 423 424 425 426 427 428 429 430 431

	for (i = 0; i < len; i++)
		I915_WRITE(guc_send_reg(guc, i), action[i]);

	POSTING_READ(guc_send_reg(guc, i - 1));

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
	ret = __intel_wait_for_register_fw(dev_priv,
					   guc_send_reg(guc, 0),
432 433 434
					   INTEL_GUC_MSG_TYPE_MASK,
					   INTEL_GUC_MSG_TYPE_RESPONSE <<
					   INTEL_GUC_MSG_TYPE_SHIFT,
435
					   10, 10, &status);
436 437 438
	/* If GuC explicitly returned an error, convert it to -EIO */
	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
		ret = -EIO;
439

440
	if (ret) {
441 442
		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
			  action[0], ret, status);
443
		goto out;
444 445
	}

446 447 448 449 450 451 452 453 454 455 456
	if (response_buf) {
		int count = min(response_buf_size, guc->send_regs.count - 1);

		for (i = 0; i < count; i++)
			response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
	}

	/* Use data from the GuC response as our return value */
	ret = INTEL_GUC_MSG_TO_DATA(status);

out:
457
	intel_uncore_forcewake_put(&dev_priv->uncore, guc->send_regs.fw_domains);
458 459 460 461 462
	mutex_unlock(&guc->send_mutex);

	return ret;
}

463
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
464 465
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
466
	u32 msg, val;
467 468 469 470 471 472 473 474 475 476 477 478

	/*
	 * Sample the log buffer flush related bits & clear them out now
	 * itself from the message identity register to minimize the
	 * probability of losing a flush interrupt, when there are back
	 * to back flush interrupts.
	 * There can be a new flush interrupt, for different log buffer
	 * type (like for ISR), whilst Host is handling one (for DPC).
	 * Since same bit is used in message register for ISR & DPC, it
	 * could happen that GuC sets the bit for 2nd interrupt but Host
	 * clears out the bit on handling the 1st interrupt.
	 */
479
	disable_rpm_wakeref_asserts(dev_priv);
480 481 482 483 484
	spin_lock(&guc->irq_lock);
	val = I915_READ(SOFT_SCRATCH(15));
	msg = val & guc->msg_enabled_mask;
	I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
	spin_unlock(&guc->irq_lock);
485
	enable_rpm_wakeref_asserts(dev_priv);
486

487 488 489 490 491 492 493 494
	intel_guc_to_host_process_recv_msg(guc, msg);
}

void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
{
	/* Make sure to handle only enabled messages */
	msg &= guc->msg_enabled_mask;

495
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
496
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
497
		intel_guc_log_handle_flush_event(&guc->log);
498 499
}

500 501 502 503 504 505
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
506
	/* WaRsDisableCoarsePowerGating:skl,cnl */
507
	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
/*
 * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
 * then return, so waiting on the H2G is not enough to guarantee GuC is done.
 * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
 * scratch register 14, so we can poll on that. Note that GuC does not ensure
 * that the value in the register is different from
 * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
 * take care of that ourselves as well.
 */
static int guc_sleep_state_action(struct intel_guc *guc,
				  const u32 *action, u32 len)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;
	u32 status;

	I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);

	ret = intel_guc_send(guc, action, len);
	if (ret)
		return ret;

	ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
					0, 0, 10, &status);
	if (ret)
		return ret;

	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
		DRM_ERROR("GuC failed to change sleep state. "
			  "action=0x%x, err=%u\n",
			  action[0], status);
		return -EIO;
	}

	return 0;
}

575 576
/**
 * intel_guc_suspend() - notify GuC entering suspend state
577
 * @guc:	the guc
578
 */
579
int intel_guc_suspend(struct intel_guc *guc)
580
{
581 582 583
	u32 data[] = {
		INTEL_GUC_ACTION_ENTER_S_STATE,
		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
584
		intel_guc_ggtt_offset(guc, guc->shared_data)
585
	};
586

587
	return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
588 589
}

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
/**
 * intel_guc_reset_engine() - ask GuC to reset an engine
 * @guc:	intel_guc structure
 * @engine:	engine to be reset
 */
int intel_guc_reset_engine(struct intel_guc *guc,
			   struct intel_engine_cs *engine)
{
	u32 data[7];

	GEM_BUG_ON(!guc->execbuf_client);

	data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
	data[1] = engine->guc_id;
	data[2] = 0;
	data[3] = 0;
	data[4] = 0;
	data[5] = guc->execbuf_client->stage_id;
608
	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
609 610 611 612

	return intel_guc_send(guc, data, ARRAY_SIZE(data));
}

613 614
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
615
 * @guc:	the guc
616
 */
617
int intel_guc_resume(struct intel_guc *guc)
618
{
619 620 621
	u32 data[] = {
		INTEL_GUC_ACTION_EXIT_S_STATE,
		GUC_POWER_D0,
622
		intel_guc_ggtt_offset(guc, guc->shared_data)
623
	};
624

625
	return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
626 627
}

628 629 630
/**
 * DOC: GuC Address Space
 *
631
 * The layout of GuC address space is shown below:
632
 *
633
 * ::
634
 *
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
 *     +===========> +====================+ <== FFFF_FFFF
 *     ^             |      Reserved      |
 *     |             +====================+ <== GUC_GGTT_TOP
 *     |             |                    |
 *     |             |        DRAM        |
 *    GuC            |                    |
 *  Address    +===> +====================+ <== GuC ggtt_pin_bias
 *   Space     ^     |                    |
 *     |       |     |                    |
 *     |      GuC    |        GuC         |
 *     |     WOPCM   |       WOPCM        |
 *     |      Size   |                    |
 *     |       |     |                    |
 *     v       v     |                    |
 *     +=======+===> +====================+ <== 0000_0000
650
 *
651
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
652
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
653
 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
654 655
 */

656 657 658 659 660 661 662 663
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
664
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
665 666 667 668 669 670 671 672 673
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
674
	u64 flags;
675 676 677 678 679 680
	int ret;

	obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

681
	vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
682 683 684
	if (IS_ERR(vma))
		goto err;

685 686
	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
	ret = i915_vma_pin(vma, 0, 0, flags);
687 688 689 690 691 692 693 694 695 696 697
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

	return vma;

err:
	i915_gem_object_put(obj);
	return vma;
}
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714

/**
 * intel_guc_reserved_gtt_size()
 * @guc:	intel_guc structure
 *
 * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
 * GuC we can't have any objects pinned in that region. This function returns
 * the size of the shadowed region.
 *
 * Returns:
 * 0 if GuC is not present or not in use.
 * Otherwise, the GuC WOPCM size.
 */
u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
{
	return guc_to_i915(guc)->wopcm.guc.size;
}