i915_drv.c 87.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38 39
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
40
#include <linux/vga_switcheroo.h>
41 42 43
#include <linux/vt.h>
#include <acpi/video.h>

44
#include <drm/drm_atomic_helper.h>
45 46 47
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
48 49 50 51
#include <drm/i915_drm.h>

#include "i915_drv.h"
#include "i915_trace.h"
52
#include "i915_pmu.h"
53
#include "i915_reset.h"
L
Lionel Landwerlin 已提交
54
#include "i915_query.h"
55 56
#include "i915_vgpu.h"
#include "intel_drv.h"
57
#include "intel_uc.h"
58
#include "intel_workarounds.h"
J
Jesse Barnes 已提交
59

60 61
static struct drm_driver driver;

62
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
63 64 65 66
static unsigned int i915_load_fail_count;

bool __i915_inject_load_failure(const char *func, int line)
{
67
	if (i915_load_fail_count >= i915_modparams.inject_load_failure)
68 69
		return false;

70
	if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
71
		DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
72
			 i915_modparams.inject_load_failure, func, line);
73
		i915_modparams.inject_load_failure = 0;
74 75 76 77 78
		return true;
	}

	return false;
}
79 80 81 82 83 84

bool i915_error_injected(void)
{
	return i915_load_fail_count && !i915_modparams.inject_load_failure;
}

85
#endif
86 87 88 89 90 91 92 93 94 95

#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
		    "providing the dmesg log by booting with drm.debug=0xf"

void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
	      const char *fmt, ...)
{
	static bool shown_bug_once;
96
	struct device *kdev = dev_priv->drm.dev;
97 98 99 100 101 102 103 104 105 106 107 108 109
	bool is_error = level[1] <= KERN_ERR[1];
	bool is_debug = level[1] == KERN_DEBUG[1];
	struct va_format vaf;
	va_list args;

	if (is_debug && !(drm_debug & DRM_UT_DRIVER))
		return;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

110 111 112 113 114 115 116
	if (is_error)
		dev_printk(level, kdev, "%pV", &vaf);
	else
		dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
			   __builtin_return_address(0), &vaf);

	va_end(args);
117 118

	if (is_error && !shown_bug_once) {
119 120 121 122 123 124 125
		/*
		 * Ask the user to file a bug report for the error, except
		 * if they may have caused the bug by fiddling with unsafe
		 * module parameters.
		 */
		if (!test_taint(TAINT_USER))
			dev_notice(kdev, "%s", FDO_BUG_MSG);
126 127 128 129
		shown_bug_once = true;
	}
}

130 131 132 133 134 135 136
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
{
	switch (id) {
	case INTEL_PCH_IBX_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
137
		WARN_ON(!IS_GEN(dev_priv, 5));
138 139 140
		return PCH_IBX;
	case INTEL_PCH_CPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found CougarPoint PCH\n");
141
		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
142 143 144
		return PCH_CPT;
	case INTEL_PCH_PPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found PantherPoint PCH\n");
145
		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		/* PantherPoint is CPT compatible */
		return PCH_CPT;
	case INTEL_PCH_LPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found LynxPoint PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
		return PCH_LPT;
	case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
		return PCH_LPT;
	case INTEL_PCH_WPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
		/* WildcatPoint is LPT compatible */
		return PCH_LPT;
	case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
		/* WildcatPoint is LPT compatible */
		return PCH_LPT;
	case INTEL_PCH_SPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
		return PCH_SPT;
	case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
		return PCH_SPT;
	case INTEL_PCH_KBP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
			!IS_COFFEELAKE(dev_priv));
		return PCH_KBP;
	case INTEL_PCH_CNP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
		return PCH_CNP;
	case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
		return PCH_CNP;
191 192 193 194 195
	case INTEL_PCH_CMP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
		WARN_ON(!IS_COFFEELAKE(dev_priv));
		/* CometPoint is CNP Compatible */
		return PCH_CNP;
196 197 198 199 200 201 202 203
	case INTEL_PCH_ICP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Ice Lake PCH\n");
		WARN_ON(!IS_ICELAKE(dev_priv));
		return PCH_ICP;
	default:
		return PCH_NONE;
	}
}
204

205 206 207 208 209 210 211 212 213 214
static bool intel_is_virt_pch(unsigned short id,
			      unsigned short svendor, unsigned short sdevice)
{
	return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
		id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
		(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
		 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
		 sdevice == PCI_SUBDEVICE_ID_QEMU));
}

215 216
static unsigned short
intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
217
{
218
	unsigned short id = 0;
219 220 221 222 223 224 225 226

	/*
	 * In a virtualized passthrough environment we can be in a
	 * setup where the ISA bridge is not able to be passed through.
	 * In this case, a south bridge can be emulated and we have to
	 * make an educated guess as to which PCH is really there.
	 */

227 228 229 230 231 232
	if (IS_ICELAKE(dev_priv))
		id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
	else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
		id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
	else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
		id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
233 234 235 236
	else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
		id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
237 238 239 240
	else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
		id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
	else if (IS_GEN(dev_priv, 5))
		id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
241 242 243 244 245 246 247

	if (id)
		DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
	else
		DRM_DEBUG_KMS("Assuming no PCH\n");

	return id;
248 249
}

250
static void intel_detect_pch(struct drm_i915_private *dev_priv)
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
{
	struct pci_dev *pch = NULL;

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
	 */
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
266
		unsigned short id;
267
		enum intel_pch pch_type;
268 269 270 271 272 273

		if (pch->vendor != PCI_VENDOR_ID_INTEL)
			continue;

		id = pch->device & INTEL_PCH_DEVICE_ID_MASK;

274 275 276
		pch_type = intel_pch_type(dev_priv, id);
		if (pch_type != PCH_NONE) {
			dev_priv->pch_type = pch_type;
277 278
			dev_priv->pch_id = id;
			break;
279
		} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
280 281
					 pch->subsystem_device)) {
			id = intel_virt_detect_pch(dev_priv);
282 283 284 285 286 287
			pch_type = intel_pch_type(dev_priv, id);

			/* Sanity check virtual PCH id */
			if (WARN_ON(id && pch_type == PCH_NONE))
				id = 0;

288 289 290
			dev_priv->pch_type = pch_type;
			dev_priv->pch_id = id;
			break;
291 292
		}
	}
293 294 295 296 297

	/*
	 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
	 * display.
	 */
298
	if (pch && !HAS_DISPLAY(dev_priv)) {
299 300 301 302 303
		DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
		dev_priv->pch_type = PCH_NOP;
		dev_priv->pch_id = 0;
	}

304 305 306 307 308 309
	if (!pch)
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
}

310 311
static int i915_getparam_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file_priv)
312
{
313
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
314
	struct pci_dev *pdev = dev_priv->drm.pdev;
315 316 317 318 319 320 321
	drm_i915_getparam_t *param = data;
	int value;

	switch (param->param) {
	case I915_PARAM_IRQ_ACTIVE:
	case I915_PARAM_ALLOW_BATCHBUFFER:
	case I915_PARAM_LAST_DISPATCH:
322
	case I915_PARAM_HAS_EXEC_CONSTANTS:
323 324 325
		/* Reject all old ums/dri params. */
		return -ENODEV;
	case I915_PARAM_CHIPSET_ID:
D
David Weinehall 已提交
326
		value = pdev->device;
327 328
		break;
	case I915_PARAM_REVISION:
D
David Weinehall 已提交
329
		value = pdev->revision;
330 331 332 333 334 335 336 337
		break;
	case I915_PARAM_NUM_FENCES_AVAIL:
		value = dev_priv->num_fence_regs;
		break;
	case I915_PARAM_HAS_OVERLAY:
		value = dev_priv->overlay ? 1 : 0;
		break;
	case I915_PARAM_HAS_BSD:
338
		value = !!dev_priv->engine[VCS0];
339 340
		break;
	case I915_PARAM_HAS_BLT:
341
		value = !!dev_priv->engine[BCS0];
342 343
		break;
	case I915_PARAM_HAS_VEBOX:
344
		value = !!dev_priv->engine[VECS0];
345 346
		break;
	case I915_PARAM_HAS_BSD2:
347
		value = !!dev_priv->engine[VCS1];
348 349
		break;
	case I915_PARAM_HAS_LLC:
D
David Weinehall 已提交
350
		value = HAS_LLC(dev_priv);
351 352
		break;
	case I915_PARAM_HAS_WT:
D
David Weinehall 已提交
353
		value = HAS_WT(dev_priv);
354 355
		break;
	case I915_PARAM_HAS_ALIASING_PPGTT:
356
		value = INTEL_PPGTT(dev_priv);
357 358
		break;
	case I915_PARAM_HAS_SEMAPHORES:
359
		value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
360 361 362 363 364 365 366 367
		break;
	case I915_PARAM_HAS_SECURE_BATCHES:
		value = capable(CAP_SYS_ADMIN);
		break;
	case I915_PARAM_CMD_PARSER_VERSION:
		value = i915_cmd_parser_get_version(dev_priv);
		break;
	case I915_PARAM_SUBSLICE_TOTAL:
368
		value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
369 370 371 372
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_EU_TOTAL:
373
		value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
374 375 376 377
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_HAS_GPU_RESET:
378 379
		value = i915_modparams.enable_hangcheck &&
			intel_has_gpu_reset(dev_priv);
380 381
		if (value && intel_has_reset_engine(dev_priv))
			value = 2;
382 383
		break;
	case I915_PARAM_HAS_RESOURCE_STREAMER:
384
		value = 0;
385
		break;
386
	case I915_PARAM_HAS_POOLED_EU:
D
David Weinehall 已提交
387
		value = HAS_POOLED_EU(dev_priv);
388 389
		break;
	case I915_PARAM_MIN_EU_IN_POOL:
390
		value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
391
		break;
392
	case I915_PARAM_HUC_STATUS:
393 394 395
		value = intel_huc_check_status(&dev_priv->huc);
		if (value < 0)
			return value;
396
		break;
397 398 399 400 401 402 403
	case I915_PARAM_MMAP_GTT_VERSION:
		/* Though we've started our numbering from 1, and so class all
		 * earlier versions as 0, in effect their value is undefined as
		 * the ioctl will report EINVAL for the unknown param!
		 */
		value = i915_gem_mmap_gtt_version();
		break;
404
	case I915_PARAM_HAS_SCHEDULER:
405
		value = dev_priv->caps.scheduler;
406
		break;
C
Chris Wilson 已提交
407

D
David Weinehall 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	case I915_PARAM_MMAP_VERSION:
		/* Remember to bump this if the version changes! */
	case I915_PARAM_HAS_GEM:
	case I915_PARAM_HAS_PAGEFLIPPING:
	case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
	case I915_PARAM_HAS_RELAXED_FENCING:
	case I915_PARAM_HAS_COHERENT_RINGS:
	case I915_PARAM_HAS_RELAXED_DELTA:
	case I915_PARAM_HAS_GEN7_SOL_RESET:
	case I915_PARAM_HAS_WAIT_TIMEOUT:
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
	case I915_PARAM_HAS_PINNED_BATCHES:
	case I915_PARAM_HAS_EXEC_NO_RELOC:
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
	case I915_PARAM_HAS_EXEC_SOFTPIN:
424
	case I915_PARAM_HAS_EXEC_ASYNC:
425
	case I915_PARAM_HAS_EXEC_FENCE:
426
	case I915_PARAM_HAS_EXEC_CAPTURE:
427
	case I915_PARAM_HAS_EXEC_BATCH_FIRST:
428
	case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
D
David Weinehall 已提交
429 430 431 432 433 434 435
		/* For the time being all of these are always true;
		 * if some supported hardware does not have one of these
		 * features this value needs to be provided from
		 * INTEL_INFO(), a feature macro, or similar.
		 */
		value = 1;
		break;
436 437 438
	case I915_PARAM_HAS_CONTEXT_ISOLATION:
		value = intel_engines_has_context_isolation(dev_priv);
		break;
439
	case I915_PARAM_SLICE_MASK:
440
		value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
441 442 443
		if (!value)
			return -ENODEV;
		break;
444
	case I915_PARAM_SUBSLICE_MASK:
445
		value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
446 447 448
		if (!value)
			return -ENODEV;
		break;
449
	case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
450
		value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
451
		break;
452 453 454
	case I915_PARAM_MMAP_GTT_COHERENT:
		value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
		break;
455 456 457 458 459
	default:
		DRM_DEBUG("Unknown parameter %d\n", param->param);
		return -EINVAL;
	}

460
	if (put_user(value, param->value))
461 462 463 464 465
		return -EFAULT;

	return 0;
}

466
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
467
{
468 469 470 471
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
472 473 474 475 476 477 478 479 480
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
481
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
482
{
483
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
484 485 486 487
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

488
	if (INTEL_GEN(dev_priv) >= 4)
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
		return ret;
	}

515
	if (INTEL_GEN(dev_priv) >= 4)
516 517 518 519 520 521 522 523 524 525
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
526
intel_setup_mchbar(struct drm_i915_private *dev_priv)
527
{
528
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
529 530 531
	u32 temp;
	bool enabled;

532
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
533 534 535 536
		return;

	dev_priv->mchbar_need_disable = false;

537
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
538 539 540 541 542 543 544 545 546 547 548
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

549
	if (intel_alloc_mchbar_resource(dev_priv))
550 551 552 553 554
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
555
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
556 557 558 559 560 561 562 563 564
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
565
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
566
{
567
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
568 569

	if (dev_priv->mchbar_need_disable) {
570
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
596
	struct drm_i915_private *dev_priv = cookie;
597

598
	intel_modeset_vga_set_state(dev_priv, state);
599 600 601 602 603 604 605
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

606 607 608
static int i915_resume_switcheroo(struct drm_device *dev);
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);

609 610 611 612 613 614 615 616 617
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };

	if (state == VGA_SWITCHEROO_ON) {
		pr_info("switched on\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		/* i915 resume handler doesn't set to D0 */
D
David Weinehall 已提交
618
		pci_set_power_state(pdev, PCI_D0);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
		i915_resume_switcheroo(dev);
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
	} else {
		pr_info("switched off\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		i915_suspend_switcheroo(dev, pmm);
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
	}
}

static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	/*
	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
	 * locking inversion with the driver load path. And the access here is
	 * completely racy anyway. So don't bother with locking for now.
	 */
	return dev->open_count == 0;
}

static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
	.set_gpu_state = i915_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = i915_switcheroo_can_switch,
};

static int i915_load_modeset_init(struct drm_device *dev)
{
649
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
650
	struct pci_dev *pdev = dev_priv->drm.pdev;
651 652 653 654 655
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

656
	if (HAS_DISPLAY(dev_priv)) {
657 658 659 660 661 662
		ret = drm_vblank_init(&dev_priv->drm,
				      INTEL_INFO(dev_priv)->num_pipes);
		if (ret)
			goto out;
	}

663
	intel_bios_init(dev_priv);
664 665 666 667 668 669 670 671

	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
672
	ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
673 674 675 676 677
	if (ret && ret != -ENODEV)
		goto out;

	intel_register_dsm_handler();

D
David Weinehall 已提交
678
	ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
679 680 681 682 683 684 685 686 687 688 689 690 691 692
	if (ret)
		goto cleanup_vga_client;

	/* must happen before intel_power_domains_init_hw() on VLV/CHV */
	intel_update_rawclk(dev_priv);

	intel_power_domains_init_hw(dev_priv, false);

	intel_csr_ucode_init(dev_priv);

	ret = intel_irq_install(dev_priv);
	if (ret)
		goto cleanup_csr;

693
	intel_setup_gmbus(dev_priv);
694 695 696

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
697 698 699
	ret = intel_modeset_init(dev);
	if (ret)
		goto cleanup_irq;
700

701
	ret = i915_gem_init(dev_priv);
702
	if (ret)
703
		goto cleanup_modeset;
704

705
	intel_overlay_setup(dev_priv);
706

707
	if (!HAS_DISPLAY(dev_priv))
708 709 710 711 712 713 714 715 716
		return 0;

	ret = intel_fbdev_init(dev);
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
	intel_hpd_init(dev_priv);

717 718
	intel_init_ipc(dev_priv);

719 720 721
	return 0;

cleanup_gem:
722
	i915_gem_suspend(dev_priv);
723
	i915_gem_fini(dev_priv);
724 725
cleanup_modeset:
	intel_modeset_cleanup(dev);
726 727
cleanup_irq:
	drm_irq_uninstall(dev);
728
	intel_teardown_gmbus(dev_priv);
729 730
cleanup_csr:
	intel_csr_ucode_fini(dev_priv);
731
	intel_power_domains_fini_hw(dev_priv);
D
David Weinehall 已提交
732
	vga_switcheroo_unregister_client(pdev);
733
cleanup_vga_client:
D
David Weinehall 已提交
734
	vga_client_register(pdev, NULL, NULL, NULL);
735 736 737 738 739 740 741
out:
	return ret;
}

static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	struct apertures_struct *ap;
742
	struct pci_dev *pdev = dev_priv->drm.pdev;
743 744 745 746 747 748 749 750
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	bool primary;
	int ret;

	ap = alloc_apertures(1);
	if (!ap)
		return -ENOMEM;

751
	ap->ranges[0].base = ggtt->gmadr.start;
752 753 754 755 756
	ap->ranges[0].size = ggtt->mappable_end;

	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

757
	ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783

	kfree(ap);

	return ret;
}

static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
784
	 * by the GPU. i915_retire_requests() is called directly when we
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
	DRM_ERROR("Failed to allocate workqueues.\n");

	return -ENOMEM;
}

813 814 815 816 817 818 819 820 821
static void i915_engines_cleanup(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id)
		kfree(engine);
}

822 823 824 825 826 827
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

828 829 830 831
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
832 833 834 835 836
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
837 838 839
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
840 841 842 843
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
844
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
845
	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
846

847
	if (pre) {
848 849
		DRM_ERROR("This is a pre-production stepping. "
			  "It may not be fully functional.\n");
850 851
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
852 853
}

854 855 856 857 858 859 860 861 862 863
/**
 * i915_driver_init_early - setup state not requiring device access
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
864
static int i915_driver_init_early(struct drm_i915_private *dev_priv)
865 866 867 868 869 870 871 872 873 874
{
	int ret = 0;

	if (i915_inject_load_failure())
		return -ENODEV;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
L
Lyude 已提交
875

876 877 878 879
	mutex_init(&dev_priv->sb_lock);
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
880
	mutex_init(&dev_priv->hdcp_comp_mutex);
881

882
	i915_memcpy_init_early(dev_priv);
883
	intel_runtime_pm_init_early(dev_priv);
884

885 886
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
887
		goto err_engines;
888

889 890 891 892
	ret = i915_gem_init_early(dev_priv);
	if (ret < 0)
		goto err_workqueues;

893
	/* This must be called before any calls to HAS_PCH_* */
894
	intel_detect_pch(dev_priv);
895

896 897
	intel_wopcm_init_early(&dev_priv->wopcm);
	intel_uc_init_early(dev_priv);
898
	intel_pm_setup(dev_priv);
899
	intel_init_dpio(dev_priv);
900 901 902
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
		goto err_uc;
903
	intel_irq_init(dev_priv);
904
	intel_hangcheck_init(dev_priv);
905 906 907
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);
908
	intel_display_crc_init(dev_priv);
909

910
	intel_detect_preproduction_hw(dev_priv);
911 912 913

	return 0;

914 915 916
err_uc:
	intel_uc_cleanup_early(dev_priv);
	i915_gem_cleanup_early(dev_priv);
917
err_workqueues:
918
	i915_workqueues_cleanup(dev_priv);
919 920
err_engines:
	i915_engines_cleanup(dev_priv);
921 922 923 924 925 926 927 928 929
	return ret;
}

/**
 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
930
	intel_irq_fini(dev_priv);
931
	intel_power_domains_cleanup(dev_priv);
932
	intel_uc_cleanup_early(dev_priv);
933
	i915_gem_cleanup_early(dev_priv);
934
	i915_workqueues_cleanup(dev_priv);
935
	i915_engines_cleanup(dev_priv);
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
}

/**
 * i915_driver_init_mmio - setup device MMIO
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

954
	if (i915_get_bridge_dev(dev_priv))
955 956
		return -EIO;

957
	ret = intel_uncore_init(&dev_priv->uncore);
958
	if (ret < 0)
959
		goto err_bridge;
960

961 962
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
963

964 965
	intel_device_info_init_mmio(dev_priv);

966
	intel_uncore_prune(&dev_priv->uncore);
967

968 969
	intel_uc_init_mmio(dev_priv);

970 971 972 973
	ret = intel_engines_init_mmio(dev_priv);
	if (ret)
		goto err_uncore;

974
	i915_gem_init_mmio(dev_priv);
975 976 977

	return 0;

978
err_uncore:
979
	intel_teardown_mchbar(dev_priv);
980
	intel_uncore_fini(&dev_priv->uncore);
981
err_bridge:
982 983 984 985 986 987 988 989 990 991 992
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
993
	intel_teardown_mchbar(dev_priv);
994
	intel_uncore_fini(&dev_priv->uncore);
995 996 997
	pci_dev_put(dev_priv->bridge_dev);
}

998 999
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
1000
	intel_gvt_sanitize_options(dev_priv);
1001 1002
}

V
Ville Syrjälä 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type

static const char *intel_dram_type_str(enum intel_dram_type type)
{
	static const char * const str[] = {
		DRAM_TYPE_STR(UNKNOWN),
		DRAM_TYPE_STR(DDR3),
		DRAM_TYPE_STR(DDR4),
		DRAM_TYPE_STR(LPDDR3),
		DRAM_TYPE_STR(LPDDR4),
	};

	if (type >= ARRAY_SIZE(str))
		type = INTEL_DRAM_UNKNOWN;

	return str[type];
}

#undef DRAM_TYPE_STR

1023 1024 1025 1026 1027
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
	return dimm->ranks * 64 / (dimm->width ?: 1);
}

1028 1029
/* Returns total GB for the whole DIMM */
static int skl_get_dimm_size(u16 val)
1030
{
1031 1032 1033 1034 1035 1036
	return val & SKL_DRAM_SIZE_MASK;
}

static int skl_get_dimm_width(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
1037
		return 0;
1038

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	switch (val & SKL_DRAM_WIDTH_MASK) {
	case SKL_DRAM_WIDTH_X8:
	case SKL_DRAM_WIDTH_X16:
	case SKL_DRAM_WIDTH_X32:
		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int skl_get_dimm_ranks(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
		return 0;

	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;

	return val + 1;
1059 1060
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
/* Returns total GB for the whole DIMM */
static int cnl_get_dimm_size(u16 val)
{
	return (val & CNL_DRAM_SIZE_MASK) / 2;
}

static int cnl_get_dimm_width(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	switch (val & CNL_DRAM_WIDTH_MASK) {
	case CNL_DRAM_WIDTH_X8:
	case CNL_DRAM_WIDTH_X16:
	case CNL_DRAM_WIDTH_X32:
		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int cnl_get_dimm_ranks(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;

	return val + 1;
}

1094
static bool
1095
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
1096
{
1097 1098
	/* Convert total GB to Gb per DRAM device */
	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
1099 1100
}

1101
static void
1102 1103
skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
		       struct dram_dimm_info *dimm,
1104
		       int channel, char dimm_name, u16 val)
1105
{
1106 1107 1108 1109 1110 1111 1112 1113 1114
	if (INTEL_GEN(dev_priv) >= 10) {
		dimm->size = cnl_get_dimm_size(val);
		dimm->width = cnl_get_dimm_width(val);
		dimm->ranks = cnl_get_dimm_ranks(val);
	} else {
		dimm->size = skl_get_dimm_size(val);
		dimm->width = skl_get_dimm_width(val);
		dimm->ranks = skl_get_dimm_ranks(val);
	}
1115

1116 1117 1118 1119
	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
		      yesno(skl_is_16gb_dimm(dimm)));
}
1120

1121
static int
1122 1123
skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
			  struct dram_channel_info *ch,
1124 1125
			  int channel, u32 val)
{
1126 1127 1128 1129
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
			       channel, 'L', val & 0xffff);
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
			       channel, 'S', val >> 16);
1130

1131
	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
1132
		DRM_DEBUG_KMS("CH%u not populated\n", channel);
1133
		return -EINVAL;
1134
	}
1135

1136
	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
1137
		ch->ranks = 2;
1138
	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
1139
		ch->ranks = 2;
1140
	else
1141
		ch->ranks = 1;
1142

1143
	ch->is_16gb_dimm =
1144 1145
		skl_is_16gb_dimm(&ch->dimm_l) ||
		skl_is_16gb_dimm(&ch->dimm_s);
1146

1147 1148
	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
1149 1150 1151 1152

	return 0;
}

1153
static bool
1154 1155
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
			const struct dram_channel_info *ch1)
1156
{
1157
	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
1158 1159
		(ch0->dimm_s.size == 0 ||
		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
1160 1161
}

1162 1163 1164 1165
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
1166
	struct dram_channel_info ch0 = {}, ch1 = {};
1167
	u32 val;
1168 1169
	int ret;

1170
	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
1171
	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
1172 1173 1174
	if (ret == 0)
		dram_info->num_channels++;

1175
	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
1176
	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (ret == 0)
		dram_info->num_channels++;

	if (dram_info->num_channels == 0) {
		DRM_INFO("Number of memory channels is zero\n");
		return -EINVAL;
	}

	/*
	 * If any of the channel is single rank channel, worst case output
	 * will be same as if single rank memory, so consider single rank
	 * memory.
	 */
1190 1191
	if (ch0.ranks == 1 || ch1.ranks == 1)
		dram_info->ranks = 1;
1192
	else
1193
		dram_info->ranks = max(ch0.ranks, ch1.ranks);
1194

1195
	if (dram_info->ranks == 0) {
1196 1197 1198
		DRM_INFO("couldn't get memory rank information\n");
		return -EINVAL;
	}
1199

1200
	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
1201

1202
	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
1203

1204 1205
	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
		      yesno(dram_info->symmetric_memory));
1206 1207 1208
	return 0;
}

V
Ville Syrjälä 已提交
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *dev_priv)
{
	u32 val;

	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);

	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
	case SKL_DRAM_DDR_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case SKL_DRAM_DDR_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case SKL_DRAM_DDR_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case SKL_DRAM_DDR_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

1231 1232 1233 1234 1235 1236 1237
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 mem_freq_khz, val;
	int ret;

V
Ville Syrjälä 已提交
1238 1239 1240
	dram_info->type = skl_get_dram_type(dev_priv);
	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
	ret = skl_dram_get_channels_info(dev_priv);
	if (ret)
		return ret;

	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_info->bandwidth_kbps = dram_info->num_channels *
							mem_freq_khz * 8;

	if (dram_info->bandwidth_kbps == 0) {
		DRM_INFO("Couldn't get system memory bandwidth\n");
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

1261 1262 1263 1264
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size(u32 val)
{
	switch (val & BXT_DRAM_SIZE_MASK) {
1265
	case BXT_DRAM_SIZE_4GBIT:
1266
		return 4;
1267
	case BXT_DRAM_SIZE_6GBIT:
1268
		return 6;
1269
	case BXT_DRAM_SIZE_8GBIT:
1270
		return 8;
1271
	case BXT_DRAM_SIZE_12GBIT:
1272
		return 12;
1273
	case BXT_DRAM_SIZE_16GBIT:
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
		return 16;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int bxt_get_dimm_width(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;

	return 8 << val;
}

static int bxt_get_dimm_ranks(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	switch (val & BXT_DRAM_RANK_MASK) {
	case BXT_DRAM_RANK_SINGLE:
		return 1;
	case BXT_DRAM_RANK_DUAL:
		return 2;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

V
Ville Syrjälä 已提交
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
static enum intel_dram_type bxt_get_dimm_type(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return INTEL_DRAM_UNKNOWN;

	switch (val & BXT_DRAM_TYPE_MASK) {
	case BXT_DRAM_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case BXT_DRAM_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case BXT_DRAM_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case BXT_DRAM_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

1327 1328 1329 1330 1331
static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
			      u32 val)
{
	dimm->width = bxt_get_dimm_width(val);
	dimm->ranks = bxt_get_dimm_ranks(val);
1332 1333 1334 1335 1336 1337

	/*
	 * Size in register is Gb per DRAM device. Convert to total
	 * GB to match the way we report this for non-LP platforms.
	 */
	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 dram_channels;
	u32 mem_freq_khz, val;
	u8 num_active_channels;
	int i;

	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
	num_active_channels = hweight32(dram_channels);

	/* Each active bit represents 4-byte channel */
	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);

	if (dram_info->bandwidth_kbps == 0) {
		DRM_INFO("Couldn't get system memory bandwidth\n");
		return -EINVAL;
	}

	/*
	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
	 */
	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1368
		struct dram_dimm_info dimm;
V
Ville Syrjälä 已提交
1369
		enum intel_dram_type type;
1370 1371 1372 1373 1374 1375

		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
		if (val == 0xFFFFFFFF)
			continue;

		dram_info->num_channels++;
1376 1377

		bxt_get_dimm_info(&dimm, val);
V
Ville Syrjälä 已提交
1378 1379 1380 1381 1382
		type = bxt_get_dimm_type(val);

		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
			dram_info->type != INTEL_DRAM_UNKNOWN &&
			dram_info->type != type);
1383

V
Ville Syrjälä 已提交
1384
		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
1385
			      i - BXT_D_CR_DRP0_DUNIT_START,
V
Ville Syrjälä 已提交
1386 1387
			      dimm.size, dimm.width, dimm.ranks,
			      intel_dram_type_str(type));
1388 1389 1390 1391 1392 1393

		/*
		 * If any of the channel is single rank channel,
		 * worst case output will be same as if single rank
		 * memory, so consider single rank memory.
		 */
1394
		if (dram_info->ranks == 0)
1395 1396
			dram_info->ranks = dimm.ranks;
		else if (dimm.ranks == 1)
1397
			dram_info->ranks = 1;
V
Ville Syrjälä 已提交
1398 1399 1400

		if (type != INTEL_DRAM_UNKNOWN)
			dram_info->type = type;
1401 1402
	}

V
Ville Syrjälä 已提交
1403 1404 1405
	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
	    dram_info->ranks == 0) {
		DRM_INFO("couldn't get memory information\n");
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	int ret;

1419 1420 1421 1422 1423 1424 1425
	/*
	 * Assume 16Gb DIMMs are present until proven otherwise.
	 * This is only used for the level 0 watermark latency
	 * w/a which does not apply to bxt/glk.
	 */
	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);

1426
	if (INTEL_GEN(dev_priv) < 9)
1427 1428
		return;

1429
	if (IS_GEN9_LP(dev_priv))
1430 1431
		ret = bxt_get_dram_info(dev_priv);
	else
1432
		ret = skl_get_dram_info(dev_priv);
1433 1434 1435
	if (ret)
		return;

1436 1437 1438 1439
	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
		      dram_info->bandwidth_kbps,
		      dram_info->num_channels);

1440
	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
1441
		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1442 1443
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
	const unsigned int sets[4] = { 1, 1, 2, 2 };

	return EDRAM_NUM_BANKS(cap) *
		ways[EDRAM_WAYS_IDX(cap)] *
		sets[EDRAM_SETS_IDX(cap)];
}

static void edram_detect(struct drm_i915_private *dev_priv)
{
	u32 edram_cap = 0;

	if (!(IS_HASWELL(dev_priv) ||
	      IS_BROADWELL(dev_priv) ||
	      INTEL_GEN(dev_priv) >= 9))
		return;

	edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);

	/* NB: We can't write IDICR yet because we don't have gt funcs set up */

	if (!(edram_cap & EDRAM_ENABLED))
		return;

	/*
	 * The needed capability bits for size calculation are not there with
	 * pre gen9 so return 128MB always.
	 */
	if (INTEL_GEN(dev_priv) < 9)
		dev_priv->edram_size_mb = 128;
	else
		dev_priv->edram_size_mb =
			gen9_edram_size_mb(dev_priv, edram_cap);

	DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}

1483 1484 1485 1486 1487 1488 1489 1490 1491
/**
 * i915_driver_init_hw - setup state requiring device access
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
D
David Weinehall 已提交
1492
	struct pci_dev *pdev = dev_priv->drm.pdev;
1493 1494 1495 1496 1497
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

1498
	intel_device_info_runtime_init(dev_priv);
1499

1500 1501
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
1502
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
1503 1504 1505 1506 1507 1508
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

1523
	intel_sanitize_options(dev_priv);
1524

1525 1526 1527
	/* needs to be done before ggtt probe */
	edram_detect(dev_priv);

1528 1529
	i915_perf_init(dev_priv);

1530
	ret = i915_ggtt_probe_hw(dev_priv);
1531
	if (ret)
1532
		goto err_perf;
1533

1534 1535 1536 1537
	/*
	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
	 * otherwise the vga fbdev driver falls over.
	 */
1538 1539 1540
	ret = i915_kick_out_firmware_fb(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1541
		goto err_ggtt;
1542 1543
	}

1544
	ret = vga_remove_vgacon(pdev);
1545 1546
	if (ret) {
		DRM_ERROR("failed to remove conflicting VGA console\n");
1547
		goto err_ggtt;
1548 1549
	}

1550
	ret = i915_ggtt_init_hw(dev_priv);
1551
	if (ret)
1552
		goto err_ggtt;
1553

1554
	ret = i915_ggtt_enable_hw(dev_priv);
1555 1556
	if (ret) {
		DRM_ERROR("failed to enable GGTT\n");
1557
		goto err_ggtt;
1558 1559
	}

D
David Weinehall 已提交
1560
	pci_set_master(pdev);
1561 1562

	/* overlay on gen2 is broken and can't address above 1G */
1563
	if (IS_GEN(dev_priv, 2)) {
D
David Weinehall 已提交
1564
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1565 1566 1567
		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

1568
			goto err_ggtt;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
		}
	}

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
1580
	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
D
David Weinehall 已提交
1581
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1582 1583 1584 1585

		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

1586
			goto err_ggtt;
1587 1588 1589 1590 1591 1592 1593 1594
		}
	}

	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	intel_uncore_sanitize(dev_priv);

1595
	intel_gt_init_workarounds(dev_priv);
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
	i915_gem_load_init_fences(dev_priv);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
1606 1607 1608 1609
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
1610 1611 1612 1613 1614 1615
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
1616
	 */
1617
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
1618
		if (pci_enable_msi(pdev) < 0)
1619 1620 1621
			DRM_DEBUG_DRIVER("can't enable MSI");
	}

1622 1623
	ret = intel_gvt_init(dev_priv);
	if (ret)
1624 1625 1626
		goto err_msi;

	intel_opregion_setup(dev_priv);
1627 1628 1629 1630 1631 1632
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
	intel_get_dram_info(dev_priv);

1633

1634 1635
	return 0;

1636 1637 1638 1639
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
	pm_qos_remove_request(&dev_priv->pm_qos);
1640
err_ggtt:
1641
	i915_ggtt_cleanup_hw(dev_priv);
1642 1643
err_perf:
	i915_perf_fini(dev_priv);
1644 1645 1646 1647 1648 1649 1650 1651 1652
	return ret;
}

/**
 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
D
David Weinehall 已提交
1653
	struct pci_dev *pdev = dev_priv->drm.pdev;
1654

1655 1656
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
1657 1658
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
1659 1660

	pm_qos_remove_request(&dev_priv->pm_qos);
1661
	i915_ggtt_cleanup_hw(dev_priv);
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
1673
	struct drm_device *dev = &dev_priv->drm;
1674

1675
	i915_gem_shrinker_register(dev_priv);
1676
	i915_pmu_register(dev_priv);
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687

	/*
	 * Notify a valid surface after modesetting,
	 * when running inside a VM.
	 */
	if (intel_vgpu_active(dev_priv))
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
D
David Weinehall 已提交
1688
		i915_setup_sysfs(dev_priv);
1689 1690 1691

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
1692 1693 1694
	} else
		DRM_ERROR("Failed to register driver for userspace access!\n");

1695
	if (HAS_DISPLAY(dev_priv)) {
1696 1697 1698 1699 1700
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

1701
	if (IS_GEN(dev_priv, 5))
1702 1703
		intel_gpu_ips_init(dev_priv);

1704
	intel_audio_init(dev_priv);
1705 1706 1707 1708 1709 1710 1711 1712 1713

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
1714 1715 1716 1717 1718

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
1719
	if (HAS_DISPLAY(dev_priv))
1720
		drm_kms_helper_poll_init(dev);
1721

1722
	intel_power_domains_enable(dev_priv);
1723
	intel_runtime_pm_enable(dev_priv);
1724 1725 1726 1727 1728 1729 1730 1731
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
1732
	intel_runtime_pm_disable(dev_priv);
1733
	intel_power_domains_disable(dev_priv);
1734

1735
	intel_fbdev_unregister(dev_priv);
1736
	intel_audio_deinit(dev_priv);
1737

1738 1739 1740 1741 1742 1743 1744
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

1745 1746 1747 1748
	intel_gpu_ips_teardown();
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

1749
	i915_perf_unregister(dev_priv);
1750
	i915_pmu_unregister(dev_priv);
1751

D
David Weinehall 已提交
1752
	i915_teardown_sysfs(dev_priv);
1753
	drm_dev_unregister(&dev_priv->drm);
1754

1755
	i915_gem_shrinker_unregister(dev_priv);
1756 1757
}

1758 1759 1760 1761 1762
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
	if (drm_debug & DRM_UT_DRIVER) {
		struct drm_printer p = drm_debug_printer("i915 device info:");

1763 1764 1765 1766 1767 1768 1769
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
			   INTEL_GEN(dev_priv));

		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1770
		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1771 1772 1773 1774 1775 1776
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
		DRM_INFO("DRM_I915_DEBUG enabled\n");
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1777 1778
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
		DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1779 1780
}

1781 1782 1783 1784 1785 1786 1787
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;
1788
	int err;
1789 1790 1791

	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
	if (!i915)
1792
		return ERR_PTR(-ENOMEM);
1793

1794 1795
	err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
	if (err) {
1796
		kfree(i915);
1797
		return ERR_PTR(err);
1798 1799 1800 1801 1802 1803 1804 1805 1806
	}

	i915->drm.pdev = pdev;
	i915->drm.dev_private = i915;
	pci_set_drvdata(pdev, &i915->drm);

	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
1807
	RUNTIME_INFO(i915)->device_id = pdev->device;
1808 1809

	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1810 1811
		     BITS_PER_TYPE(device_info->platform_mask));
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1812 1813 1814 1815

	return i915;
}

1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
static void i915_driver_destroy(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;

	drm_dev_fini(&i915->drm);
	kfree(i915);

	/* And make sure we never chase our dangling pointer from pci_dev */
	pci_set_drvdata(pdev, NULL);
}

1827 1828
/**
 * i915_driver_load - setup chip and create an initial config
1829 1830
 * @pdev: PCI device
 * @ent: matching PCI ID entry
1831 1832 1833 1834 1835 1836 1837
 *
 * The driver load routine has to do several things:
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
1838
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1839
{
1840 1841
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
1842 1843
	struct drm_i915_private *dev_priv;
	int ret;
1844

1845
	dev_priv = i915_driver_create(pdev, ent);
1846 1847
	if (IS_ERR(dev_priv))
		return PTR_ERR(dev_priv);
1848

1849 1850 1851 1852
	/* Disable nuclear pageflip by default on pre-ILK */
	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;

1853 1854
	ret = pci_enable_device(pdev);
	if (ret)
1855
		goto out_fini;
D
Damien Lespiau 已提交
1856

1857
	ret = i915_driver_init_early(dev_priv);
1858 1859
	if (ret < 0)
		goto out_pci_disable;
1860

1861
	disable_rpm_wakeref_asserts(dev_priv);
L
Linus Torvalds 已提交
1862

1863 1864 1865
	ret = i915_driver_init_mmio(dev_priv);
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
1866

1867 1868 1869
	ret = i915_driver_init_hw(dev_priv);
	if (ret < 0)
		goto out_cleanup_mmio;
1870

1871
	ret = i915_load_modeset_init(&dev_priv->drm);
1872
	if (ret < 0)
1873
		goto out_cleanup_hw;
1874 1875 1876

	i915_driver_register(dev_priv);

1877
	enable_rpm_wakeref_asserts(dev_priv);
1878

1879 1880
	i915_welcome_messages(dev_priv);

1881 1882 1883 1884 1885 1886 1887
	return 0;

out_cleanup_hw:
	i915_driver_cleanup_hw(dev_priv);
out_cleanup_mmio:
	i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
1888
	enable_rpm_wakeref_asserts(dev_priv);
1889 1890 1891
	i915_driver_cleanup_early(dev_priv);
out_pci_disable:
	pci_disable_device(pdev);
1892
out_fini:
1893
	i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1894
	i915_driver_destroy(dev_priv);
1895 1896 1897
	return ret;
}

1898
void i915_driver_unload(struct drm_device *dev)
1899
{
1900
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1901
	struct pci_dev *pdev = dev_priv->drm.pdev;
1902

1903
	disable_rpm_wakeref_asserts(dev_priv);
1904

1905 1906
	i915_driver_unregister(dev_priv);

1907 1908 1909
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

1910
	i915_gem_suspend(dev_priv);
B
Ben Widawsky 已提交
1911

1912
	drm_atomic_helper_shutdown(dev);
1913

1914 1915
	intel_gvt_cleanup(dev_priv);

1916 1917
	intel_modeset_cleanup(dev);

1918
	intel_bios_cleanup(dev_priv);
1919

D
David Weinehall 已提交
1920 1921
	vga_switcheroo_unregister_client(pdev);
	vga_client_register(pdev, NULL, NULL, NULL);
1922

1923
	intel_csr_ucode_fini(dev_priv);
1924

1925 1926
	/* Free error state after interrupts are fully disabled. */
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1927
	i915_reset_error_state(dev_priv);
1928

1929
	i915_gem_fini(dev_priv);
1930

1931
	intel_power_domains_fini_hw(dev_priv);
1932 1933 1934 1935

	i915_driver_cleanup_hw(dev_priv);
	i915_driver_cleanup_mmio(dev_priv);

1936
	enable_rpm_wakeref_asserts(dev_priv);
1937
	intel_runtime_pm_cleanup(dev_priv);
1938 1939 1940 1941 1942
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
1943 1944

	i915_driver_cleanup_early(dev_priv);
1945
	i915_driver_destroy(dev_priv);
1946 1947
}

1948
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1949
{
1950
	struct drm_i915_private *i915 = to_i915(dev);
1951
	int ret;
1952

1953
	ret = i915_gem_open(i915, file);
1954 1955
	if (ret)
		return ret;
1956

1957 1958
	return 0;
}
1959

1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1977

1978
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1979
{
1980 1981
	struct drm_i915_file_private *file_priv = file->driver_priv;

1982
	mutex_lock(&dev->struct_mutex);
1983
	i915_gem_context_close(file);
1984 1985 1986 1987
	i915_gem_release(dev, file);
	mutex_unlock(&dev->struct_mutex);

	kfree(file_priv);
1988 1989
}

1990 1991
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1992
	struct drm_device *dev = &dev_priv->drm;
1993
	struct intel_encoder *encoder;
1994 1995

	drm_modeset_lock_all(dev);
1996 1997 1998
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1999 2000 2001
	drm_modeset_unlock_all(dev);
}

2002 2003
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
2004
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
2005

2006 2007 2008 2009 2010 2011 2012 2013
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
2014

2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
2025
	i915_gem_suspend(i915);
2026

2027
	return 0;
2028 2029
}

2030
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
2031
{
2032
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
2033
	struct pci_dev *pdev = dev_priv->drm.pdev;
2034
	pci_power_t opregion_target_state;
2035

2036 2037
	disable_rpm_wakeref_asserts(dev_priv);

2038 2039
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
2040
	intel_power_domains_disable(dev_priv);
2041

2042 2043
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
2044
	pci_save_state(pdev);
J
Jesse Barnes 已提交
2045

2046
	intel_display_suspend(dev);
2047

2048
	intel_dp_mst_suspend(dev_priv);
2049

2050 2051
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
2052

2053
	intel_suspend_encoders(dev_priv);
2054

2055
	intel_suspend_hw(dev_priv);
2056

2057
	i915_gem_suspend_gtt_mappings(dev_priv);
2058

2059
	i915_save_state(dev_priv);
2060

2061
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
2062
	intel_opregion_suspend(dev_priv, opregion_target_state);
2063

2064
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
2065

2066 2067
	dev_priv->suspend_count++;

2068
	intel_csr_ucode_suspend(dev_priv);
2069

2070 2071
	enable_rpm_wakeref_asserts(dev_priv);

2072
	return 0;
2073 2074
}

2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

2087
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2088
{
2089
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
2090
	struct pci_dev *pdev = dev_priv->drm.pdev;
2091 2092
	int ret;

2093 2094
	disable_rpm_wakeref_asserts(dev_priv);

2095 2096
	i915_gem_suspend_late(dev_priv);

2097
	intel_uncore_suspend(&dev_priv->uncore);
2098

2099 2100
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
2101

2102
	ret = 0;
2103
	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
2104
		bxt_enable_dc9(dev_priv);
2105
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2106 2107 2108
		hsw_enable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
2109 2110 2111

	if (ret) {
		DRM_ERROR("Suspend complete failed: %d\n", ret);
2112
		intel_power_domains_resume(dev_priv);
2113

2114
		goto out;
2115 2116
	}

D
David Weinehall 已提交
2117
	pci_disable_device(pdev);
2118
	/*
2119
	 * During hibernation on some platforms the BIOS may try to access
2120 2121
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
2122 2123 2124 2125 2126 2127 2128
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
2129
	 */
2130
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
2131
		pci_set_power_state(pdev, PCI_D3hot);
2132

2133 2134
out:
	enable_rpm_wakeref_asserts(dev_priv);
2135 2136
	if (!dev_priv->uncore.user_forcewake.count)
		intel_runtime_pm_cleanup(dev_priv);
2137 2138

	return ret;
2139 2140
}

2141
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
2142 2143 2144
{
	int error;

2145
	if (!dev) {
2146 2147 2148 2149 2150
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

2151 2152 2153
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
			 state.event != PM_EVENT_FREEZE))
		return -EINVAL;
2154 2155 2156

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
2157

2158
	error = i915_drm_suspend(dev);
2159 2160 2161
	if (error)
		return error;

2162
	return i915_drm_suspend_late(dev, false);
J
Jesse Barnes 已提交
2163 2164
}

2165
static int i915_drm_resume(struct drm_device *dev)
2166
{
2167
	struct drm_i915_private *dev_priv = to_i915(dev);
2168
	int ret;
2169

2170
	disable_rpm_wakeref_asserts(dev_priv);
2171
	intel_sanitize_gt_powersave(dev_priv);
2172

2173 2174
	i915_gem_sanitize(dev_priv);

2175
	ret = i915_ggtt_enable_hw(dev_priv);
2176 2177 2178
	if (ret)
		DRM_ERROR("failed to re-enable GGTT\n");

2179 2180
	intel_csr_ucode_resume(dev_priv);

2181
	i915_restore_state(dev_priv);
2182
	intel_pps_unlock_regs_wa(dev_priv);
2183

2184
	intel_init_pch_refclk(dev_priv);
2185

2186 2187 2188 2189 2190
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
2191 2192
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
2193 2194 2195 2196 2197
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

2198 2199
	drm_mode_config_reset(dev);

2200
	i915_gem_resume(dev_priv);
2201

2202
	intel_modeset_init_hw(dev);
2203
	intel_init_clock_gating(dev_priv);
2204

2205 2206
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
2207
		dev_priv->display.hpd_irq_setup(dev_priv);
2208
	spin_unlock_irq(&dev_priv->irq_lock);
2209

2210
	intel_dp_mst_resume(dev_priv);
2211

2212 2213
	intel_display_resume(dev);

2214 2215
	drm_kms_helper_poll_enable(dev);

2216 2217 2218
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
2219
	 * bother with the tiny race here where we might lose hotplug
2220 2221 2222
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
2223

2224
	intel_opregion_resume(dev_priv);
2225

2226
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
2227

2228 2229
	intel_power_domains_enable(dev_priv);

2230 2231
	enable_rpm_wakeref_asserts(dev_priv);

2232
	return 0;
2233 2234
}

2235
static int i915_drm_resume_early(struct drm_device *dev)
2236
{
2237
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
2238
	struct pci_dev *pdev = dev_priv->drm.pdev;
2239
	int ret;
2240

2241 2242 2243 2244 2245 2246 2247 2248 2249
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
2261
	ret = pci_set_power_state(pdev, PCI_D0);
2262 2263
	if (ret) {
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2264
		return ret;
2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
2280 2281
	if (pci_enable_device(pdev))
		return -EIO;
2282

D
David Weinehall 已提交
2283
	pci_set_master(pdev);
2284

2285 2286
	disable_rpm_wakeref_asserts(dev_priv);

2287
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2288
		ret = vlv_resume_prepare(dev_priv, false);
2289
	if (ret)
2290 2291
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);
2292

2293 2294 2295
	intel_uncore_resume_early(&dev_priv->uncore);

	i915_check_and_clear_faults(dev_priv);
2296

2297
	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
2298
		gen9_sanitize_dc_state(dev_priv);
2299
		bxt_disable_dc9(dev_priv);
2300
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2301
		hsw_disable_pc8(dev_priv);
2302
	}
2303

2304
	intel_uncore_sanitize(dev_priv);
2305

2306
	intel_power_domains_resume(dev_priv);
2307

2308
	intel_engines_sanitize(dev_priv, true);
2309

2310 2311
	enable_rpm_wakeref_asserts(dev_priv);

2312
	return ret;
2313 2314
}

2315
static int i915_resume_switcheroo(struct drm_device *dev)
2316
{
2317
	int ret;
2318

2319 2320 2321
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

2322
	ret = i915_drm_resume_early(dev);
2323 2324 2325
	if (ret)
		return ret;

2326 2327 2328
	return i915_drm_resume(dev);
}

2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
static int i915_pm_prepare(struct device *kdev)
{
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);

	if (!dev) {
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

	return i915_drm_prepare(dev);
}

2345
static int i915_pm_suspend(struct device *kdev)
2346
{
2347 2348
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
2349

2350 2351
	if (!dev) {
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2352 2353
		return -ENODEV;
	}
2354

2355
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2356 2357
		return 0;

2358
	return i915_drm_suspend(dev);
2359 2360
}

2361
static int i915_pm_suspend_late(struct device *kdev)
2362
{
2363
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2364 2365

	/*
D
Damien Lespiau 已提交
2366
	 * We have a suspend ordering issue with the snd-hda driver also
2367 2368 2369 2370 2371 2372 2373
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
2374
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2375
		return 0;
2376

2377
	return i915_drm_suspend_late(dev, false);
2378 2379
}

2380
static int i915_pm_poweroff_late(struct device *kdev)
2381
{
2382
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2383

2384
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2385 2386
		return 0;

2387
	return i915_drm_suspend_late(dev, true);
2388 2389
}

2390
static int i915_pm_resume_early(struct device *kdev)
2391
{
2392
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2393

2394
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2395 2396
		return 0;

2397
	return i915_drm_resume_early(dev);
2398 2399
}

2400
static int i915_pm_resume(struct device *kdev)
2401
{
2402
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2403

2404
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2405 2406
		return 0;

2407
	return i915_drm_resume(dev);
2408 2409
}

2410
/* freeze: before creating the hibernation_image */
2411
static int i915_pm_freeze(struct device *kdev)
2412
{
2413
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2414 2415
	int ret;

2416 2417 2418 2419 2420
	if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(dev);
		if (ret)
			return ret;
	}
2421 2422 2423 2424 2425 2426

	ret = i915_gem_freeze(kdev_to_i915(kdev));
	if (ret)
		return ret;

	return 0;
2427 2428
}

2429
static int i915_pm_freeze_late(struct device *kdev)
2430
{
2431
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2432 2433
	int ret;

2434 2435 2436 2437 2438
	if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(dev, true);
		if (ret)
			return ret;
	}
2439

2440
	ret = i915_gem_freeze_late(kdev_to_i915(kdev));
2441 2442 2443 2444
	if (ret)
		return ret;

	return 0;
2445 2446 2447
}

/* thaw: called after creating the hibernation image, but before turning off. */
2448
static int i915_pm_thaw_early(struct device *kdev)
2449
{
2450
	return i915_pm_resume_early(kdev);
2451 2452
}

2453
static int i915_pm_thaw(struct device *kdev)
2454
{
2455
	return i915_pm_resume(kdev);
2456 2457 2458
}

/* restore: called after loading the hibernation image. */
2459
static int i915_pm_restore_early(struct device *kdev)
2460
{
2461
	return i915_pm_resume_early(kdev);
2462 2463
}

2464
static int i915_pm_restore(struct device *kdev)
2465
{
2466
	return i915_pm_resume(kdev);
2467 2468
}

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2508
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2509 2510

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2511
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2552
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
2564
	s->pcbr			= I915_READ(VLV_PCBR);
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2590
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2591 2592

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2593
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2634
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
2659
	I915_WRITE(VLV_PCBR,			s->pcbr);
2660 2661 2662
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

2663 2664 2665
static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
				  u32 mask, u32 val)
{
2666 2667 2668 2669
	i915_reg_t reg = VLV_GTLC_PW_STATUS;
	u32 reg_value;
	int ret;

2670 2671 2672 2673 2674 2675 2676
	/* The HW does not like us polling for PW_STATUS frequently, so
	 * use the sleeping loop rather than risk the busy spin within
	 * intel_wait_for_register().
	 *
	 * Transitioning between RC6 states should be at most 2ms (see
	 * valleyview_enable_rps) so use a 3ms timeout.
	 */
2677 2678 2679 2680 2681 2682
	ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);

	/* just trace the final value */
	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);

	return ret;
2683 2684
}

2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

2699
	err = intel_wait_for_register(&dev_priv->uncore,
2700 2701 2702 2703
				      VLV_GTLC_SURVIVABILITY_REG,
				      VLV_GFX_CLK_STATUS_BIT,
				      VLV_GFX_CLK_STATUS_BIT,
				      20);
2704 2705 2706 2707 2708 2709 2710
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
}

2711 2712
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
2713
	u32 mask;
2714
	u32 val;
2715
	int err;
2716 2717 2718 2719 2720 2721 2722 2723

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

2724 2725 2726 2727
	mask = VLV_GTLC_ALLOWWAKEACK;
	val = allow ? mask : 0;

	err = vlv_wait_for_pw_status(dev_priv, mask, val);
2728 2729
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
2730

2731 2732 2733
	return err;
}

2734 2735
static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				  bool wait_for_on)
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
{
	u32 mask;
	u32 val;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
2746 2747 2748
	 *
	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
	 * reset and we are trying to force the machine to sleep.
2749
	 */
2750
	if (vlv_wait_for_pw_status(dev_priv, mask, val))
2751 2752
		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
				 onoff(wait_for_on));
2753 2754 2755 2756 2757 2758 2759
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

2760
	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2761 2762 2763
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

2764
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2765 2766 2767 2768 2769 2770 2771 2772
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
2773
	vlv_wait_for_gt_wells(dev_priv, false);
2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
2787

2788
	if (!IS_CHERRYVIEW(dev_priv))
2789
		vlv_save_gunit_s0ix_state(dev_priv);
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

2806 2807
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
{
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

2819
	if (!IS_CHERRYVIEW(dev_priv))
2820
		vlv_restore_gunit_s0ix_state(dev_priv);
2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

2832
	if (rpm_resume)
2833
		intel_init_clock_gating(dev_priv);
2834 2835 2836 2837

	return ret;
}

2838
static int intel_runtime_suspend(struct device *kdev)
2839
{
2840
	struct pci_dev *pdev = to_pci_dev(kdev);
2841
	struct drm_device *dev = pci_get_drvdata(pdev);
2842
	struct drm_i915_private *dev_priv = to_i915(dev);
2843
	int ret;
2844

2845
	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
2846 2847
		return -ENODEV;

2848
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2849 2850
		return -ENODEV;

2851 2852
	DRM_DEBUG_KMS("Suspending device\n");

2853 2854
	disable_rpm_wakeref_asserts(dev_priv);

2855 2856 2857 2858
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
2859
	i915_gem_runtime_suspend(dev_priv);
2860

2861
	intel_uc_suspend(dev_priv);
2862

2863
	intel_runtime_pm_disable_interrupts(dev_priv);
2864

2865
	intel_uncore_suspend(&dev_priv->uncore);
2866

2867
	ret = 0;
2868 2869 2870 2871
	if (INTEL_GEN(dev_priv) >= 11) {
		icl_display_core_uninit(dev_priv);
		bxt_enable_dc9(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
2872 2873 2874 2875 2876 2877 2878 2879
		bxt_display_core_uninit(dev_priv);
		bxt_enable_dc9(dev_priv);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		hsw_enable_pc8(dev_priv);
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		ret = vlv_suspend_complete(dev_priv);
	}

2880 2881
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2882
		intel_uncore_runtime_resume(&dev_priv->uncore);
2883

2884
		intel_runtime_pm_enable_interrupts(dev_priv);
2885

2886
		intel_uc_resume(dev_priv);
2887 2888 2889 2890

		i915_gem_init_swizzling(dev_priv);
		i915_gem_restore_fences(dev_priv);

2891 2892
		enable_rpm_wakeref_asserts(dev_priv);

2893 2894
		return ret;
	}
2895

2896
	enable_rpm_wakeref_asserts(dev_priv);
2897
	intel_runtime_pm_cleanup(dev_priv);
2898

2899
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
2900 2901
		DRM_ERROR("Unclaimed access detected prior to suspending\n");

2902
	dev_priv->runtime_pm.suspended = true;
2903 2904

	/*
2905 2906
	 * FIXME: We really should find a document that references the arguments
	 * used below!
2907
	 */
2908
	if (IS_BROADWELL(dev_priv)) {
2909 2910 2911 2912 2913 2914
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
2915
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2916
	} else {
2917 2918 2919 2920 2921 2922 2923
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
2924
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
2925
	}
2926

2927
	assert_forcewakes_inactive(&dev_priv->uncore);
2928

2929
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2930 2931
		intel_hpd_poll_init(dev_priv);

2932
	DRM_DEBUG_KMS("Device suspended\n");
2933 2934 2935
	return 0;
}

2936
static int intel_runtime_resume(struct device *kdev)
2937
{
2938
	struct pci_dev *pdev = to_pci_dev(kdev);
2939
	struct drm_device *dev = pci_get_drvdata(pdev);
2940
	struct drm_i915_private *dev_priv = to_i915(dev);
2941
	int ret = 0;
2942

2943
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2944
		return -ENODEV;
2945 2946 2947

	DRM_DEBUG_KMS("Resuming device\n");

2948
	WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
2949 2950
	disable_rpm_wakeref_asserts(dev_priv);

2951
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
2952
	dev_priv->runtime_pm.suspended = false;
2953
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
2954
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2955

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
	if (INTEL_GEN(dev_priv) >= 11) {
		bxt_disable_dc9(dev_priv);
		icl_display_core_init(dev_priv, true);
		if (dev_priv->csr.dmc_payload) {
			if (dev_priv->csr.allowed_dc_mask &
			    DC_STATE_EN_UPTO_DC6)
				skl_enable_dc6(dev_priv);
			else if (dev_priv->csr.allowed_dc_mask &
				 DC_STATE_EN_UPTO_DC5)
				gen9_enable_dc5(dev_priv);
		}
	} else if (IS_GEN9_LP(dev_priv)) {
2968 2969
		bxt_disable_dc9(dev_priv);
		bxt_display_core_init(dev_priv, true);
2970 2971 2972
		if (dev_priv->csr.dmc_payload &&
		    (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
			gen9_enable_dc5(dev_priv);
2973
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2974
		hsw_disable_pc8(dev_priv);
2975
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2976
		ret = vlv_resume_prepare(dev_priv, true);
2977
	}
2978

2979
	intel_uncore_runtime_resume(&dev_priv->uncore);
2980

2981 2982
	intel_runtime_pm_enable_interrupts(dev_priv);

2983
	intel_uc_resume(dev_priv);
2984

2985 2986 2987 2988
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
2989
	i915_gem_init_swizzling(dev_priv);
2990
	i915_gem_restore_fences(dev_priv);
2991

2992 2993 2994 2995 2996
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
2997
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2998 2999
		intel_hpd_init(dev_priv);

3000 3001
	intel_enable_ipc(dev_priv);

3002 3003
	enable_rpm_wakeref_asserts(dev_priv);

3004 3005 3006 3007 3008 3009
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
3010 3011
}

3012
const struct dev_pm_ops i915_pm_ops = {
3013 3014 3015 3016
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
3017
	.prepare = i915_pm_prepare,
3018
	.suspend = i915_pm_suspend,
3019 3020
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
3021
	.resume = i915_pm_resume,
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
3038 3039 3040 3041
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
3042
	.poweroff = i915_pm_suspend,
3043
	.poweroff_late = i915_pm_poweroff_late,
3044 3045
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
3046 3047

	/* S0ix (via runtime suspend) event handlers */
3048 3049
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
3050 3051
};

3052
static const struct vm_operations_struct i915_gem_vm_ops = {
3053
	.fault = i915_gem_fault,
3054 3055
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
3056 3057
};

3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
	.compat_ioctl = i915_compat_ioctl,
	.llseek = noop_llseek,
};

3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
3084
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3096 3097
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
3113 3114
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
3115
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
3116
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
3117
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
3118 3119 3120 3121
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
3122
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3123
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
3124 3125 3126 3127 3128 3129
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
3130
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
3131 3132
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
L
Lionel Landwerlin 已提交
3133
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3134 3135
};

L
Linus Torvalds 已提交
3136
static struct drm_driver driver = {
3137 3138
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
3139
	 */
3140
	.driver_features =
D
Daniel Vetter 已提交
3141
	    DRIVER_GEM | DRIVER_PRIME |
3142
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
3143
	.release = i915_driver_release,
3144
	.open = i915_driver_open,
3145
	.lastclose = i915_driver_lastclose,
3146
	.postclose = i915_driver_postclose,
3147

3148
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
3149
	.gem_free_object_unlocked = i915_gem_free_object,
3150
	.gem_vm_ops = &i915_gem_vm_ops,
3151 3152 3153 3154 3155 3156

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

3157
	.dumb_create = i915_gem_dumb_create,
3158
	.dumb_map_offset = i915_gem_mmap_gtt,
L
Linus Torvalds 已提交
3159
	.ioctls = i915_ioctls,
3160
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
3161
	.fops = &i915_driver_fops,
3162 3163 3164 3165 3166 3167
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
3168
};
3169 3170 3171 3172

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_drm.c"
#endif