i915_drv.c 73.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38 39
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
40
#include <linux/vga_switcheroo.h>
41 42 43 44
#include <linux/vt.h>
#include <acpi/video.h>

#include <drm/drmP.h>
45
#include <drm/drm_crtc_helper.h>
46 47 48 49 50 51
#include <drm/i915_drm.h>

#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
J
Jesse Barnes 已提交
52

53 54
static struct drm_driver driver;

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
static unsigned int i915_load_fail_count;

bool __i915_inject_load_failure(const char *func, int line)
{
	if (i915_load_fail_count >= i915.inject_load_failure)
		return false;

	if (++i915_load_fail_count == i915.inject_load_failure) {
		DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
			 i915.inject_load_failure, func, line);
		return true;
	}

	return false;
}

#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
		    "providing the dmesg log by booting with drm.debug=0xf"

void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
	      const char *fmt, ...)
{
	static bool shown_bug_once;
80
	struct device *dev = dev_priv->drm.dev;
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	bool is_error = level[1] <= KERN_ERR[1];
	bool is_debug = level[1] == KERN_DEBUG[1];
	struct va_format vaf;
	va_list args;

	if (is_debug && !(drm_debug & DRM_UT_DRIVER))
		return;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
		   __builtin_return_address(0), &vaf);

	if (is_error && !shown_bug_once) {
		dev_notice(dev, "%s", FDO_BUG_MSG);
		shown_bug_once = true;
	}

	va_end(args);
}

static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
	return i915.inject_load_failure &&
	       i915_load_fail_count == i915.inject_load_failure;
}

#define i915_load_error(dev_priv, fmt, ...)				     \
	__i915_printk(dev_priv,						     \
		      i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
		      fmt, ##__VA_ARGS__)


static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
	enum intel_pch ret = PCH_NOP;

	/*
	 * In a virtualized passthrough environment we can be in a
	 * setup where the ISA bridge is not able to be passed through.
	 * In this case, a south bridge can be emulated and we have to
	 * make an educated guess as to which PCH is really there.
	 */

	if (IS_GEN5(dev)) {
		ret = PCH_IBX;
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
		ret = PCH_CPT;
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = PCH_LPT;
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
		ret = PCH_SPT;
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
	}

	return ret;
}

static void intel_detect_pch(struct drm_device *dev)
{
147
	struct drm_i915_private *dev_priv = to_i915(dev);
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	struct pci_dev *pch = NULL;

	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
	 */
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
			dev_priv->pch_id = id;

			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
				WARN_ON(!IS_GEN5(dev));
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
				WARN_ON(!IS_SKYLAKE(dev) &&
					!IS_KABYLAKE(dev));
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
				WARN_ON(!IS_SKYLAKE(dev) &&
					!IS_KABYLAKE(dev));
207 208 209 210
			} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_KBP;
				DRM_DEBUG_KMS("Found KabyPoint PCH\n");
				WARN_ON(!IS_KABYLAKE(dev));
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
				    pch->subsystem_vendor ==
					    PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
				    pch->subsystem_device ==
					    PCI_SUBDEVICE_ID_QEMU)) {
				dev_priv->pch_type = intel_virt_detect_pch(dev);
			} else
				continue;

			break;
		}
	}
	if (!pch)
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
}

static int i915_getparam(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
234
	struct drm_i915_private *dev_priv = to_i915(dev);
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	drm_i915_getparam_t *param = data;
	int value;

	switch (param->param) {
	case I915_PARAM_IRQ_ACTIVE:
	case I915_PARAM_ALLOW_BATCHBUFFER:
	case I915_PARAM_LAST_DISPATCH:
		/* Reject all old ums/dri params. */
		return -ENODEV;
	case I915_PARAM_CHIPSET_ID:
		value = dev->pdev->device;
		break;
	case I915_PARAM_REVISION:
		value = dev->pdev->revision;
		break;
	case I915_PARAM_HAS_GEM:
		value = 1;
		break;
	case I915_PARAM_NUM_FENCES_AVAIL:
		value = dev_priv->num_fence_regs;
		break;
	case I915_PARAM_HAS_OVERLAY:
		value = dev_priv->overlay ? 1 : 0;
		break;
	case I915_PARAM_HAS_PAGEFLIPPING:
		value = 1;
		break;
	case I915_PARAM_HAS_EXECBUF2:
		/* depends on GEM */
		value = 1;
		break;
	case I915_PARAM_HAS_BSD:
		value = intel_engine_initialized(&dev_priv->engine[VCS]);
		break;
	case I915_PARAM_HAS_BLT:
		value = intel_engine_initialized(&dev_priv->engine[BCS]);
		break;
	case I915_PARAM_HAS_VEBOX:
		value = intel_engine_initialized(&dev_priv->engine[VECS]);
		break;
	case I915_PARAM_HAS_BSD2:
		value = intel_engine_initialized(&dev_priv->engine[VCS2]);
		break;
	case I915_PARAM_HAS_RELAXED_FENCING:
		value = 1;
		break;
	case I915_PARAM_HAS_COHERENT_RINGS:
		value = 1;
		break;
	case I915_PARAM_HAS_EXEC_CONSTANTS:
		value = INTEL_INFO(dev)->gen >= 4;
		break;
	case I915_PARAM_HAS_RELAXED_DELTA:
		value = 1;
		break;
	case I915_PARAM_HAS_GEN7_SOL_RESET:
		value = 1;
		break;
	case I915_PARAM_HAS_LLC:
		value = HAS_LLC(dev);
		break;
	case I915_PARAM_HAS_WT:
		value = HAS_WT(dev);
		break;
	case I915_PARAM_HAS_ALIASING_PPGTT:
		value = USES_PPGTT(dev);
		break;
	case I915_PARAM_HAS_WAIT_TIMEOUT:
		value = 1;
		break;
	case I915_PARAM_HAS_SEMAPHORES:
306
		value = i915.semaphores;
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
		break;
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
		value = 1;
		break;
	case I915_PARAM_HAS_SECURE_BATCHES:
		value = capable(CAP_SYS_ADMIN);
		break;
	case I915_PARAM_HAS_PINNED_BATCHES:
		value = 1;
		break;
	case I915_PARAM_HAS_EXEC_NO_RELOC:
		value = 1;
		break;
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
		value = 1;
		break;
	case I915_PARAM_CMD_PARSER_VERSION:
		value = i915_cmd_parser_get_version(dev_priv);
		break;
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
		value = 1;
		break;
	case I915_PARAM_MMAP_VERSION:
		value = 1;
		break;
	case I915_PARAM_SUBSLICE_TOTAL:
		value = INTEL_INFO(dev)->subslice_total;
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_EU_TOTAL:
		value = INTEL_INFO(dev)->eu_total;
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_HAS_GPU_RESET:
		value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
		break;
	case I915_PARAM_HAS_RESOURCE_STREAMER:
		value = HAS_RESOURCE_STREAMER(dev);
		break;
	case I915_PARAM_HAS_EXEC_SOFTPIN:
		value = 1;
		break;
351 352 353 354 355 356
	case I915_PARAM_HAS_POOLED_EU:
		value = HAS_POOLED_EU(dev);
		break;
	case I915_PARAM_MIN_EU_IN_POOL:
		value = INTEL_INFO(dev)->min_eu_in_pool;
		break;
357 358 359 360 361
	default:
		DRM_DEBUG("Unknown parameter %d\n", param->param);
		return -EINVAL;
	}

362
	if (put_user(value, param->value))
363 364 365 366 367 368 369
		return -EFAULT;

	return 0;
}

static int i915_get_bridge_dev(struct drm_device *dev)
{
370
	struct drm_i915_private *dev_priv = to_i915(dev);
371 372 373 374 375 376 377 378 379 380 381 382 383

	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
384
	struct drm_i915_private *dev_priv = to_i915(dev);
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

	if (INTEL_INFO(dev)->gen >= 4)
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
		return ret;
	}

	if (INTEL_INFO(dev)->gen >= 4)
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
430
	struct drm_i915_private *dev_priv = to_i915(dev);
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp;
	bool enabled;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		return;

	dev_priv->mchbar_need_disable = false;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

	if (intel_alloc_mchbar_resource(dev))
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
intel_teardown_mchbar(struct drm_device *dev)
{
470
	struct drm_i915_private *dev_priv = to_i915(dev);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;

	if (dev_priv->mchbar_need_disable) {
		if (IS_I915G(dev) || IS_I915GM(dev)) {
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
	struct drm_device *dev = cookie;

	intel_modeset_vga_set_state(dev, state);
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };

	if (state == VGA_SWITCHEROO_ON) {
		pr_info("switched on\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		/* i915 resume handler doesn't set to D0 */
		pci_set_power_state(dev->pdev, PCI_D0);
		i915_resume_switcheroo(dev);
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
	} else {
		pr_info("switched off\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		i915_suspend_switcheroo(dev, pmm);
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
	}
}

static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	/*
	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
	 * locking inversion with the driver load path. And the access here is
	 * completely racy anyway. So don't bother with locking for now.
	 */
	return dev->open_count == 0;
}

static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
	.set_gpu_state = i915_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = i915_switcheroo_can_switch,
};

static void i915_gem_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload. Afterwards we then clean up the
	 * GEM state tracking, flushing off the requests and leaving the
	 * system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that reseting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
	if (HAS_HW_CONTEXTS(dev)) {
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}

	mutex_lock(&dev->struct_mutex);
	i915_gem_reset(dev);
	i915_gem_cleanup_engines(dev);
	i915_gem_context_fini(dev);
	mutex_unlock(&dev->struct_mutex);

	WARN_ON(!list_empty(&to_i915(dev)->context_list));
}

static int i915_load_modeset_init(struct drm_device *dev)
{
587
	struct drm_i915_private *dev_priv = to_i915(dev);
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

	ret = intel_bios_init(dev_priv);
	if (ret)
		DRM_INFO("failed to find VBIOS tables\n");

	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
	if (ret && ret != -ENODEV)
		goto out;

	intel_register_dsm_handler();

	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
	if (ret)
		goto cleanup_vga_client;

	/* must happen before intel_power_domains_init_hw() on VLV/CHV */
	intel_update_rawclk(dev_priv);

	intel_power_domains_init_hw(dev_priv, false);

	intel_csr_ucode_init(dev_priv);

	ret = intel_irq_install(dev_priv);
	if (ret)
		goto cleanup_csr;

	intel_setup_gmbus(dev);

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
	intel_modeset_init(dev);

	intel_guc_init(dev);

	ret = i915_gem_init(dev);
	if (ret)
		goto cleanup_irq;

	intel_modeset_gem_init(dev);

	if (INTEL_INFO(dev)->num_pipes == 0)
		return 0;

	ret = intel_fbdev_init(dev);
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
	intel_hpd_init(dev_priv);

	drm_kms_helper_poll_init(dev);

	return 0;

cleanup_gem:
	i915_gem_fini(dev);
cleanup_irq:
	intel_guc_fini(dev);
	drm_irq_uninstall(dev);
	intel_teardown_gmbus(dev);
cleanup_csr:
	intel_csr_ucode_fini(dev_priv);
	intel_power_domains_fini(dev_priv);
	vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
	vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
	return ret;
}

#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	struct apertures_struct *ap;
673
	struct pci_dev *pdev = dev_priv->drm.pdev;
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	bool primary;
	int ret;

	ap = alloc_apertures(1);
	if (!ap)
		return -ENOMEM;

	ap->ranges[0].base = ggtt->mappable_base;
	ap->ranges[0].size = ggtt->mappable_end;

	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);

	kfree(ap);

	return ret;
}
#else
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	return 0;
}
#endif

#if !defined(CONFIG_VGA_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	return 0;
}
#elif !defined(CONFIG_DUMMY_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	return -ENODEV;
}
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	int ret = 0;

	DRM_INFO("Replacing VGA console driver\n");

	console_lock();
	if (con_is_bound(&vga_con))
		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
	if (ret == 0) {
		ret = do_unregister_con_driver(&vga_con);

		/* Ignore "already unregistered". */
		if (ret == -ENODEV)
			ret = 0;
	}
	console_unlock();

	return ret;
}
#endif

static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
	DRM_ERROR("Failed to allocate workqueues.\n");

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

/**
 * i915_driver_init_early - setup state not requiring device access
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
static int i915_driver_init_early(struct drm_i915_private *dev_priv,
				  const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	int ret = 0;

	if (i915_inject_load_failure())
		return -ENODEV;

	/* Setup the write-once "constant" device info */
811
	device_info = mkwrite_device_info(dev_priv);
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
	memcpy(device_info, match_info, sizeof(*device_info));
	device_info->device_id = dev_priv->drm.pdev->device;

	BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
	device_info->gen_mask = BIT(device_info->gen - 1);

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->sb_lock);
	mutex_init(&dev_priv->modeset_restore_lock);
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);

	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
		return ret;

	ret = intel_gvt_init(dev_priv);
	if (ret < 0)
		goto err_workqueues;

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(&dev_priv->drm);

	intel_pm_setup(&dev_priv->drm);
	intel_init_dpio(dev_priv);
	intel_power_domains_init(dev_priv);
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);
	i915_gem_load_init(&dev_priv->drm);

	intel_display_crc_init(&dev_priv->drm);

852
	intel_device_info_dump(dev_priv);
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev_priv))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	return 0;

err_workqueues:
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
875
	i915_gem_load_cleanup(&dev_priv->drm);
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
	i915_workqueues_cleanup(dev_priv);
}

static int i915_mmio_setup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	int mmio_bar;
	int mmio_size;

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/*
	 * Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (INTEL_INFO(dev)->gen < 5)
		mmio_size = 512 * 1024;
	else
		mmio_size = 2 * 1024 * 1024;
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (dev_priv->regs == NULL) {
		DRM_ERROR("failed to map registers\n");

		return -EIO;
	}

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);

	return 0;
}

static void i915_mmio_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	intel_teardown_mchbar(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
}

/**
 * i915_driver_init_mmio - setup device MMIO
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
930
	struct drm_device *dev = &dev_priv->drm;
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

	if (i915_get_bridge_dev(dev))
		return -EIO;

	ret = i915_mmio_setup(dev);
	if (ret < 0)
		goto put_bridge;

	intel_uncore_init(dev_priv);

	return 0;

put_bridge:
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
959
	struct drm_device *dev = &dev_priv->drm;
960 961 962 963 964 965

	intel_uncore_fini(dev_priv);
	i915_mmio_cleanup(dev);
	pci_dev_put(dev_priv->bridge_dev);
}

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
	i915.enable_execlists =
		intel_sanitize_enable_execlists(dev_priv,
						i915.enable_execlists);

	/*
	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
	 * user's requested state against the hardware/driver capabilities.  We
	 * do this now so that we can print out any log messages once rather
	 * than every time we check intel_enable_ppgtt().
	 */
	i915.enable_ppgtt =
		intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
981 982 983

	i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
	DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
984 985
}

986 987 988 989 990 991 992 993 994
/**
 * i915_driver_init_hw - setup state requiring device access
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
995
	struct drm_device *dev = &dev_priv->drm;
996 997 998 999 1000 1001 1002
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	uint32_t aperture_size;
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

1003 1004 1005
	intel_device_info_runtime_init(dev_priv);

	intel_sanitize_options(dev_priv);
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113

	ret = i915_ggtt_init_hw(dev);
	if (ret)
		return ret;

	ret = i915_ggtt_enable_hw(dev);
	if (ret) {
		DRM_ERROR("failed to enable GGTT\n");
		goto out_ggtt;
	}

	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
	 * otherwise the vga fbdev driver falls over. */
	ret = i915_kick_out_firmware_fb(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
		goto out_ggtt;
	}

	ret = i915_kick_out_vgacon(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting VGA console\n");
		goto out_ggtt;
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev)) {
		ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

			goto out_ggtt;
		}
	}


	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
		ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

			goto out_ggtt;
		}
	}

	aperture_size = ggtt->mappable_end;

	ggtt->mappable =
		io_mapping_create_wc(ggtt->mappable_base,
				     aperture_size);
	if (!ggtt->mappable) {
		ret = -EIO;
		goto out_ggtt;
	}

	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
					      aperture_size);

	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	intel_uncore_sanitize(dev_priv);

	intel_opregion_setup(dev_priv);

	i915_gem_load_init_fences(dev_priv);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev)) {
		if (pci_enable_msi(dev->pdev) < 0)
			DRM_DEBUG_DRIVER("can't enable MSI");
	}

	return 0;

out_ggtt:
	i915_ggtt_cleanup_hw(dev);

	return ret;
}

/**
 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
1114
	struct drm_device *dev = &dev_priv->drm;
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	struct i915_ggtt *ggtt = &dev_priv->ggtt;

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	pm_qos_remove_request(&dev_priv->pm_qos);
	arch_phys_wc_del(ggtt->mtrr);
	io_mapping_free(ggtt->mappable);
	i915_ggtt_cleanup_hw(dev);
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
1135
	struct drm_device *dev = &dev_priv->drm;
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185

	i915_gem_shrinker_init(dev_priv);

	/*
	 * Notify a valid surface after modesetting,
	 * when running inside a VM.
	 */
	if (intel_vgpu_active(dev_priv))
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
		i915_setup_sysfs(dev);
	} else
		DRM_ERROR("Failed to register driver for userspace access!\n");

	if (INTEL_INFO(dev_priv)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

	if (IS_GEN5(dev_priv))
		intel_gpu_ips_init(dev_priv);

	i915_audio_component_init(dev_priv);

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
	i915_audio_component_cleanup(dev_priv);

	intel_gpu_ips_teardown();
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

1186
	i915_teardown_sysfs(&dev_priv->drm);
1187
	i915_debugfs_unregister(dev_priv);
1188
	drm_dev_unregister(&dev_priv->drm);
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

	i915_gem_shrinker_cleanup(dev_priv);
}

/**
 * i915_driver_load - setup chip and create an initial config
 * @dev: DRM device
 * @flags: startup flags
 *
 * The driver load routine has to do several things:
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
1204
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1205 1206 1207
{
	struct drm_i915_private *dev_priv;
	int ret;
1208

1209 1210 1211
	if (i915.nuclear_pageflip)
		driver.driver_features |= DRIVER_ATOMIC;

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	ret = -ENOMEM;
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv)
		ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
	if (ret) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "[" DRM_NAME ":%s] allocation failed\n", __func__);
		kfree(dev_priv);
		return ret;
	}
1222

1223 1224
	dev_priv->drm.pdev = pdev;
	dev_priv->drm.dev_private = dev_priv;
1225

1226 1227 1228
	ret = pci_enable_device(pdev);
	if (ret)
		goto out_free_priv;
D
Damien Lespiau 已提交
1229

1230
	pci_set_drvdata(pdev, &dev_priv->drm);
1231

1232 1233 1234
	ret = i915_driver_init_early(dev_priv, ent);
	if (ret < 0)
		goto out_pci_disable;
1235

1236
	intel_runtime_pm_get(dev_priv);
L
Linus Torvalds 已提交
1237

1238 1239 1240
	ret = i915_driver_init_mmio(dev_priv);
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
1241

1242 1243 1244
	ret = i915_driver_init_hw(dev_priv);
	if (ret < 0)
		goto out_cleanup_mmio;
1245 1246

	/*
1247 1248 1249
	 * TODO: move the vblank init and parts of modeset init steps into one
	 * of the i915_driver_init_/i915_driver_register functions according
	 * to the role/effect of the given init step.
1250
	 */
1251
	if (INTEL_INFO(dev_priv)->num_pipes) {
1252
		ret = drm_vblank_init(&dev_priv->drm,
1253 1254 1255
				      INTEL_INFO(dev_priv)->num_pipes);
		if (ret)
			goto out_cleanup_hw;
1256 1257
	}

1258
	ret = i915_load_modeset_init(&dev_priv->drm);
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
	if (ret < 0)
		goto out_cleanup_vblank;

	i915_driver_register(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	intel_runtime_pm_put(dev_priv);

	return 0;

out_cleanup_vblank:
1271
	drm_vblank_cleanup(&dev_priv->drm);
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
out_cleanup_hw:
	i915_driver_cleanup_hw(dev_priv);
out_cleanup_mmio:
	i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
	intel_runtime_pm_put(dev_priv);
	i915_driver_cleanup_early(dev_priv);
out_pci_disable:
	pci_disable_device(pdev);
out_free_priv:
	i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
	drm_dev_unref(&dev_priv->drm);
1284 1285 1286
	return ret;
}

1287
void i915_driver_unload(struct drm_device *dev)
1288
{
1289
	struct drm_i915_private *dev_priv = to_i915(dev);
1290

1291 1292
	intel_fbdev_fini(dev);

1293 1294
	if (i915_gem_suspend(dev))
		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
B
Ben Widawsky 已提交
1295

1296 1297 1298 1299 1300 1301 1302 1303
	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);

	i915_driver_unregister(dev_priv);

	drm_vblank_cleanup(dev);

	intel_modeset_cleanup(dev);

1304
	/*
1305 1306
	 * free the memory space allocated for the child device
	 * config parsed from VBT
1307
	 */
1308 1309 1310 1311 1312 1313 1314 1315 1316
	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
		kfree(dev_priv->vbt.child_dev);
		dev_priv->vbt.child_dev = NULL;
		dev_priv->vbt.child_dev_num = 0;
	}
	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1317

1318 1319
	vga_switcheroo_unregister_client(dev->pdev);
	vga_client_register(dev->pdev, NULL, NULL, NULL);
1320

1321
	intel_csr_ucode_fini(dev_priv);
1322

1323 1324 1325 1326 1327
	/* Free error state after interrupts are fully disabled. */
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
	i915_destroy_error_state(dev);

	/* Flush any outstanding unpin_work. */
1328
	drain_workqueue(dev_priv->wq);
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

	intel_guc_fini(dev);
	i915_gem_fini(dev);
	intel_fbc_cleanup_cfb(dev_priv);

	intel_power_domains_fini(dev_priv);

	i915_driver_cleanup_hw(dev_priv);
	i915_driver_cleanup_mmio(dev_priv);

	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);

	i915_driver_cleanup_early(dev_priv);
1342 1343
}

1344
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1345
{
1346
	int ret;
1347

1348 1349 1350
	ret = i915_gem_open(dev, file);
	if (ret)
		return ret;
1351

1352 1353
	return 0;
}
1354

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1372

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
	mutex_lock(&dev->struct_mutex);
	i915_gem_context_close(dev, file);
	i915_gem_release(dev, file);
	mutex_unlock(&dev->struct_mutex);
}

static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

	kfree(file_priv);
1386 1387
}

1388 1389
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1390
	struct drm_device *dev = &dev_priv->drm;
1391
	struct intel_encoder *encoder;
1392 1393

	drm_modeset_lock_all(dev);
1394 1395 1396
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1397 1398 1399
	drm_modeset_unlock_all(dev);
}

1400 1401
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
1402
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1403

1404 1405 1406 1407 1408 1409 1410 1411
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1412

1413
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1414
{
1415
	struct drm_i915_private *dev_priv = to_i915(dev);
1416
	pci_power_t opregion_target_state;
1417
	int error;
1418

1419 1420 1421 1422 1423
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

1424 1425
	disable_rpm_wakeref_asserts(dev_priv);

1426 1427
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1428
	intel_display_set_init_power(dev_priv, true);
1429

1430 1431
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
1432 1433
	pci_save_state(dev->pdev);

1434 1435 1436 1437
	error = i915_gem_suspend(dev);
	if (error) {
		dev_err(&dev->pdev->dev,
			"GEM idle failed, resume might fail\n");
1438
		goto out;
1439
	}
1440

1441 1442
	intel_guc_suspend(dev);

1443
	intel_display_suspend(dev);
1444

1445
	intel_dp_mst_suspend(dev);
1446

1447 1448
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1449

1450
	intel_suspend_encoders(dev_priv);
1451

1452
	intel_suspend_hw(dev);
1453

1454 1455
	i915_gem_suspend_gtt_mappings(dev);

1456 1457
	i915_save_state(dev);

1458
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1459
	intel_opregion_notify_adapter(dev_priv, opregion_target_state);
1460

1461
	intel_uncore_forcewake_reset(dev_priv, false);
1462
	intel_opregion_unregister(dev_priv);
1463

1464
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1465

1466 1467
	dev_priv->suspend_count++;

1468 1469
	intel_display_set_init_power(dev_priv, false);

1470
	intel_csr_ucode_suspend(dev_priv);
1471

1472 1473 1474 1475
out:
	enable_rpm_wakeref_asserts(dev_priv);

	return error;
1476 1477
}

1478
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
1479
{
1480
	struct drm_i915_private *dev_priv = to_i915(drm_dev);
1481
	bool fw_csr;
1482 1483
	int ret;

1484 1485
	disable_rpm_wakeref_asserts(dev_priv);

1486 1487
	fw_csr = !IS_BROXTON(dev_priv) &&
		suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1488 1489 1490 1491 1492 1493 1494 1495 1496
	/*
	 * In case of firmware assisted context save/restore don't manually
	 * deinit the power domains. This also means the CSR/DMC firmware will
	 * stay active, it will power down any HW resources as required and
	 * also enable deeper system power states that would be blocked if the
	 * firmware was inactive.
	 */
	if (!fw_csr)
		intel_power_domains_suspend(dev_priv);
1497

1498
	ret = 0;
1499
	if (IS_BROXTON(dev_priv))
1500
		bxt_enable_dc9(dev_priv);
1501
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1502 1503 1504
		hsw_enable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
1505 1506 1507

	if (ret) {
		DRM_ERROR("Suspend complete failed: %d\n", ret);
1508 1509
		if (!fw_csr)
			intel_power_domains_init_hw(dev_priv, true);
1510

1511
		goto out;
1512 1513 1514
	}

	pci_disable_device(drm_dev->pdev);
1515
	/*
1516
	 * During hibernation on some platforms the BIOS may try to access
1517 1518
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1519 1520 1521 1522 1523 1524 1525
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1526
	 */
1527
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
1528
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
1529

1530 1531
	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);

1532 1533 1534 1535
out:
	enable_rpm_wakeref_asserts(dev_priv);

	return ret;
1536 1537
}

1538
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
1539 1540 1541
{
	int error;

1542
	if (!dev) {
1543 1544 1545 1546 1547
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

1548 1549 1550
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
			 state.event != PM_EVENT_FREEZE))
		return -EINVAL;
1551 1552 1553

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
1554

1555
	error = i915_drm_suspend(dev);
1556 1557 1558
	if (error)
		return error;

1559
	return i915_drm_suspend_late(dev, false);
J
Jesse Barnes 已提交
1560 1561
}

1562
static int i915_drm_resume(struct drm_device *dev)
1563
{
1564
	struct drm_i915_private *dev_priv = to_i915(dev);
1565
	int ret;
1566

1567 1568
	disable_rpm_wakeref_asserts(dev_priv);

1569 1570 1571 1572
	ret = i915_ggtt_enable_hw(dev);
	if (ret)
		DRM_ERROR("failed to re-enable GGTT\n");

1573 1574
	intel_csr_ucode_resume(dev_priv);

1575
	i915_gem_resume(dev);
1576

1577
	i915_restore_state(dev);
1578
	intel_opregion_setup(dev_priv);
1579

1580 1581
	intel_init_pch_refclk(dev);
	drm_mode_config_reset(dev);
1582

1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1593 1594 1595
	mutex_lock(&dev->struct_mutex);
	if (i915_gem_init_hw(dev)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
1596
		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
1597 1598
	}
	mutex_unlock(&dev->struct_mutex);
1599

1600 1601
	intel_guc_resume(dev);

1602
	intel_modeset_init_hw(dev);
1603

1604 1605
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
1606
		dev_priv->display.hpd_irq_setup(dev_priv);
1607
	spin_unlock_irq(&dev_priv->irq_lock);
1608

1609
	intel_dp_mst_resume(dev);
1610

1611 1612
	intel_display_resume(dev);

1613 1614 1615 1616 1617 1618 1619 1620 1621
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
	 * bother with the tiny race here where we might loose hotplug
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
	/* Config may have changed between suspend and resume */
	drm_helper_hpd_irq_event(dev);
1622

1623
	intel_opregion_register(dev_priv);
1624

1625
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1626

1627 1628 1629
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
1630

1631
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1632

1633 1634
	drm_kms_helper_poll_enable(dev);

1635 1636
	enable_rpm_wakeref_asserts(dev_priv);

1637
	return 0;
1638 1639
}

1640
static int i915_drm_resume_early(struct drm_device *dev)
1641
{
1642
	struct drm_i915_private *dev_priv = to_i915(dev);
1643
	int ret;
1644

1645 1646 1647 1648 1649 1650 1651 1652 1653
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
	ret = pci_set_power_state(dev->pdev, PCI_D0);
	if (ret) {
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
		goto out;
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
1684 1685 1686 1687
	if (pci_enable_device(dev->pdev)) {
		ret = -EIO;
		goto out;
	}
1688 1689 1690

	pci_set_master(dev->pdev);

1691 1692
	disable_rpm_wakeref_asserts(dev_priv);

1693
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1694
		ret = vlv_resume_prepare(dev_priv, false);
1695
	if (ret)
1696 1697
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);
1698

1699
	intel_uncore_early_sanitize(dev_priv, true);
1700

1701
	if (IS_BROXTON(dev_priv)) {
1702 1703
		if (!dev_priv->suspended_to_idle)
			gen9_sanitize_dc_state(dev_priv);
1704
		bxt_disable_dc9(dev_priv);
1705
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1706
		hsw_disable_pc8(dev_priv);
1707
	}
1708

1709
	intel_uncore_sanitize(dev_priv);
1710

1711 1712
	if (IS_BROXTON(dev_priv) ||
	    !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
1713 1714
		intel_power_domains_init_hw(dev_priv, true);

1715 1716
	enable_rpm_wakeref_asserts(dev_priv);

1717 1718
out:
	dev_priv->suspended_to_idle = false;
1719 1720

	return ret;
1721 1722
}

1723
int i915_resume_switcheroo(struct drm_device *dev)
1724
{
1725
	int ret;
1726

1727 1728 1729
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

1730
	ret = i915_drm_resume_early(dev);
1731 1732 1733
	if (ret)
		return ret;

1734 1735 1736
	return i915_drm_resume(dev);
}

1737
/**
1738
 * i915_reset - reset chip after a hang
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
1752
int i915_reset(struct drm_i915_private *dev_priv)
1753
{
1754
	struct drm_device *dev = &dev_priv->drm;
1755 1756
	struct i915_gpu_error *error = &dev_priv->gpu_error;
	unsigned reset_counter;
1757
	int ret;
1758

1759
	mutex_lock(&dev->struct_mutex);
1760

1761 1762
	/* Clear any previous failed attempts at recovery. Time to try again. */
	atomic_andnot(I915_WEDGED, &error->reset_counter);
1763

1764 1765 1766 1767 1768 1769 1770
	/* Clear the reset-in-progress flag and increment the reset epoch. */
	reset_counter = atomic_inc_return(&error->reset_counter);
	if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
		ret = -EIO;
		goto error;
	}

1771 1772
	pr_notice("drm/i915: Resetting chip after gpu hang\n");

1773
	i915_gem_reset(dev);
1774

1775
	ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
1776
	if (ret) {
1777 1778 1779 1780
		if (ret != -ENODEV)
			DRM_ERROR("Failed to reset chip: %i\n", ret);
		else
			DRM_DEBUG_DRIVER("GPU reset disabled\n");
1781
		goto error;
1782 1783
	}

1784 1785
	intel_overlay_reset(dev_priv);

1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
1800 1801 1802
	ret = i915_gem_init_hw(dev);
	if (ret) {
		DRM_ERROR("Failed hw init on reset %d\n", ret);
1803
		goto error;
1804 1805
	}

1806 1807
	mutex_unlock(&dev->struct_mutex);

1808 1809 1810 1811 1812 1813
	/*
	 * rps/rc6 re-init is necessary to restore state lost after the
	 * reset and the re-install of gt irqs. Skip for ironlake per
	 * previous concerns that it doesn't respond well to some forms
	 * of re-init after reset.
	 */
C
Chris Wilson 已提交
1814 1815
	if (INTEL_GEN(dev_priv) > 5)
		intel_enable_gt_powersave(dev_priv);
1816

1817
	return 0;
1818 1819 1820 1821 1822

error:
	atomic_or(I915_WEDGED, &error->reset_counter);
	mutex_unlock(&dev->struct_mutex);
	return ret;
1823 1824
}

1825
static int i915_pm_suspend(struct device *dev)
1826
{
1827 1828
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1829

1830
	if (!drm_dev) {
1831 1832 1833
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
1834

1835 1836 1837
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

1838
	return i915_drm_suspend(drm_dev);
1839 1840 1841 1842
}

static int i915_pm_suspend_late(struct device *dev)
{
1843
	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1844 1845

	/*
D
Damien Lespiau 已提交
1846
	 * We have a suspend ordering issue with the snd-hda driver also
1847 1848 1849 1850 1851 1852 1853 1854 1855
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
1856

1857 1858 1859 1860 1861
	return i915_drm_suspend_late(drm_dev, false);
}

static int i915_pm_poweroff_late(struct device *dev)
{
1862
	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1863 1864 1865 1866 1867

	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

	return i915_drm_suspend_late(drm_dev, true);
1868 1869
}

1870 1871
static int i915_pm_resume_early(struct device *dev)
{
1872
	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1873

1874 1875 1876
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

1877
	return i915_drm_resume_early(drm_dev);
1878 1879
}

1880
static int i915_pm_resume(struct device *dev)
1881
{
1882
	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1883

1884 1885 1886
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

1887
	return i915_drm_resume(drm_dev);
1888 1889
}

1890 1891 1892 1893 1894 1895 1896 1897
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *dev)
{
	return i915_pm_suspend(dev);
}

static int i915_pm_freeze_late(struct device *dev)
{
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	int ret;

	ret = i915_pm_suspend_late(dev);
	if (ret)
		return ret;

	ret = i915_gem_freeze_late(dev_to_i915(dev));
	if (ret)
		return ret;

	return 0;
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
}

/* thaw: called after creating the hibernation image, but before turning off. */
static int i915_pm_thaw_early(struct device *dev)
{
	return i915_pm_resume_early(dev);
}

static int i915_pm_thaw(struct device *dev)
{
	return i915_pm_resume(dev);
}

/* restore: called after loading the hibernation image. */
static int i915_pm_restore_early(struct device *dev)
{
	return i915_pm_resume_early(dev);
}

static int i915_pm_restore(struct device *dev)
{
	return i915_pm_resume(dev);
}

1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1972
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
1973 1974

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1975
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2016
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
2028
	s->pcbr			= I915_READ(VLV_PCBR);
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2054
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2055 2056

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2057
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2098
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
2123
	I915_WRITE(VLV_PCBR,			s->pcbr);
2124 2125 2126
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

2141 2142 2143 2144 2145
	err = intel_wait_for_register(dev_priv,
				      VLV_GTLC_SURVIVABILITY_REG,
				      VLV_GFX_CLK_STATUS_BIT,
				      VLV_GFX_CLK_STATUS_BIT,
				      20);
2146 2147 2148 2149 2150 2151 2152
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
}

2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
	u32 val;
	int err = 0;

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

2165 2166 2167 2168 2169
	err = intel_wait_for_register(dev_priv,
				      VLV_GTLC_PW_STATUS,
				      VLV_GTLC_ALLOWWAKEACK,
				      allow,
				      1);
2170 2171
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
2172

2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
	return err;
}

static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				 bool wait_for_on)
{
	u32 mask;
	u32 val;
	int err;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;
2185
	if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
2186 2187 2188
		return 0;

	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
2189 2190
		      onoff(wait_for_on),
		      I915_READ(VLV_GTLC_PW_STATUS));
2191 2192 2193 2194 2195

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
	 */
2196 2197 2198
	err = intel_wait_for_register(dev_priv,
				      VLV_GTLC_PW_STATUS, mask, val,
				      3);
2199 2200
	if (err)
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
2201
			  onoff(wait_for_on));
2202 2203 2204 2205 2206 2207 2208 2209 2210

	return err;
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

2211
	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2212 2213 2214
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

2215
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
	(void)vlv_wait_for_gt_wells(dev_priv, false);

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
2238

2239
	if (!IS_CHERRYVIEW(dev_priv))
2240
		vlv_save_gunit_s0ix_state(dev_priv);
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

2257 2258
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
2259
{
2260
	struct drm_device *dev = &dev_priv->drm;
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

2271
	if (!IS_CHERRYVIEW(dev_priv))
2272
		vlv_restore_gunit_s0ix_state(dev_priv);
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

2284 2285 2286 2287
	if (rpm_resume) {
		intel_init_clock_gating(dev);
		i915_gem_restore_fences(dev);
	}
2288 2289 2290 2291

	return ret;
}

2292
static int intel_runtime_suspend(struct device *device)
2293 2294 2295
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
2296
	struct drm_i915_private *dev_priv = to_i915(dev);
2297
	int ret;
2298

2299
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
2300 2301
		return -ENODEV;

2302 2303 2304
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

2305 2306
	DRM_DEBUG_KMS("Suspending device\n");

2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
	/*
	 * We could deadlock here in case another thread holding struct_mutex
	 * calls RPM suspend concurrently, since the RPM suspend will wait
	 * first for this RPM suspend to finish. In this case the concurrent
	 * RPM resume will be followed by its RPM suspend counterpart. Still
	 * for consistency return -EAGAIN, which will reschedule this suspend.
	 */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
		 * Bump the expiration timestamp, otherwise the suspend won't
		 * be rescheduled.
		 */
		pm_runtime_mark_last_busy(device);

		return -EAGAIN;
	}
2324 2325 2326

	disable_rpm_wakeref_asserts(dev_priv);

2327 2328 2329 2330 2331 2332 2333
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);

2334 2335
	intel_guc_suspend(dev);

2336
	intel_runtime_pm_disable_interrupts(dev_priv);
2337

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
	ret = 0;
	if (IS_BROXTON(dev_priv)) {
		bxt_display_core_uninit(dev_priv);
		bxt_enable_dc9(dev_priv);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		hsw_enable_pc8(dev_priv);
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		ret = vlv_suspend_complete(dev_priv);
	}

2348 2349
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2350
		intel_runtime_pm_enable_interrupts(dev_priv);
2351

2352 2353
		enable_rpm_wakeref_asserts(dev_priv);

2354 2355
		return ret;
	}
2356

2357
	intel_uncore_forcewake_reset(dev_priv, false);
2358 2359 2360

	enable_rpm_wakeref_asserts(dev_priv);
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2361

2362
	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
2363 2364
		DRM_ERROR("Unclaimed access detected prior to suspending\n");

2365
	dev_priv->pm.suspended = true;
2366 2367

	/*
2368 2369
	 * FIXME: We really should find a document that references the arguments
	 * used below!
2370
	 */
2371
	if (IS_BROADWELL(dev_priv)) {
2372 2373 2374 2375 2376 2377
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
2378
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2379
	} else {
2380 2381 2382 2383 2384 2385 2386
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
2387
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
2388
	}
2389

2390
	assert_forcewakes_inactive(dev_priv);
2391

2392 2393 2394
	if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
		intel_hpd_poll_init(dev_priv);

2395
	DRM_DEBUG_KMS("Device suspended\n");
2396 2397 2398
	return 0;
}

2399
static int intel_runtime_resume(struct device *device)
2400 2401 2402
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
2403
	struct drm_i915_private *dev_priv = to_i915(dev);
2404
	int ret = 0;
2405

2406 2407
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;
2408 2409 2410

	DRM_DEBUG_KMS("Resuming device\n");

2411 2412 2413
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);

2414
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
2415
	dev_priv->pm.suspended = false;
2416 2417
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2418

2419 2420
	intel_guc_resume(dev);

2421 2422
	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);
2423

2424 2425 2426
	if (IS_BROXTON(dev)) {
		bxt_disable_dc9(dev_priv);
		bxt_display_core_init(dev_priv, true);
2427 2428 2429
		if (dev_priv->csr.dmc_payload &&
		    (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
			gen9_enable_dc5(dev_priv);
2430
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2431
		hsw_disable_pc8(dev_priv);
2432
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2433
		ret = vlv_resume_prepare(dev_priv, true);
2434
	}
2435

2436 2437 2438 2439
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
2440 2441
	i915_gem_init_swizzling(dev);

2442
	intel_runtime_pm_enable_interrupts(dev_priv);
C
Chris Wilson 已提交
2443
	intel_enable_gt_powersave(dev_priv);
2444 2445 2446 2447 2448 2449

	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
2450
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2451 2452
		intel_hpd_init(dev_priv);

2453 2454
	enable_rpm_wakeref_asserts(dev_priv);

2455 2456 2457 2458 2459 2460
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
2461 2462
}

2463
const struct dev_pm_ops i915_pm_ops = {
2464 2465 2466 2467
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
2468
	.suspend = i915_pm_suspend,
2469 2470
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
2471
	.resume = i915_pm_resume,
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
2488 2489 2490 2491
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
2492
	.poweroff = i915_pm_suspend,
2493
	.poweroff_late = i915_pm_poweroff_late,
2494 2495
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
2496 2497

	/* S0ix (via runtime suspend) event handlers */
2498 2499
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
2500 2501
};

2502
static const struct vm_operations_struct i915_gem_vm_ops = {
2503
	.fault = i915_gem_fault,
2504 2505
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
2506 2507
};

2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
};

L
Linus Torvalds 已提交
2584
static struct drm_driver driver = {
2585 2586
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
2587
	 */
2588
	.driver_features =
2589
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
2590
	    DRIVER_RENDER | DRIVER_MODESET,
2591
	.open = i915_driver_open,
2592 2593
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
2594
	.postclose = i915_driver_postclose,
2595
	.set_busid = drm_pci_set_busid,
2596

2597
	.gem_free_object = i915_gem_free_object,
2598
	.gem_vm_ops = &i915_gem_vm_ops,
2599 2600 2601 2602 2603 2604

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

2605
	.dumb_create = i915_gem_dumb_create,
2606
	.dumb_map_offset = i915_gem_mmap_gtt,
2607
	.dumb_destroy = drm_gem_dumb_destroy,
L
Linus Torvalds 已提交
2608
	.ioctls = i915_ioctls,
2609
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
2610
	.fops = &i915_driver_fops,
2611 2612 2613 2614 2615 2616
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
2617
};