i915_drv.c 87.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38 39
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
40
#include <linux/vga_switcheroo.h>
41 42 43
#include <linux/vt.h>
#include <acpi/video.h>

44
#include <drm/drm_atomic_helper.h>
45 46 47
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
48 49 50 51
#include <drm/i915_drm.h>

#include "i915_drv.h"
#include "i915_trace.h"
52
#include "i915_pmu.h"
53
#include "i915_reset.h"
L
Lionel Landwerlin 已提交
54
#include "i915_query.h"
55 56
#include "i915_vgpu.h"
#include "intel_drv.h"
57
#include "intel_uc.h"
58
#include "intel_workarounds.h"
J
Jesse Barnes 已提交
59

60 61
static struct drm_driver driver;

62
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
63 64 65 66
static unsigned int i915_load_fail_count;

bool __i915_inject_load_failure(const char *func, int line)
{
67
	if (i915_load_fail_count >= i915_modparams.inject_load_failure)
68 69
		return false;

70
	if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
71
		DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
72
			 i915_modparams.inject_load_failure, func, line);
73
		i915_modparams.inject_load_failure = 0;
74 75 76 77 78
		return true;
	}

	return false;
}
79 80 81 82 83 84

bool i915_error_injected(void)
{
	return i915_load_fail_count && !i915_modparams.inject_load_failure;
}

85
#endif
86 87 88 89 90 91 92 93 94 95

#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
		    "providing the dmesg log by booting with drm.debug=0xf"

void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
	      const char *fmt, ...)
{
	static bool shown_bug_once;
96
	struct device *kdev = dev_priv->drm.dev;
97 98 99 100 101 102 103 104 105 106 107 108 109
	bool is_error = level[1] <= KERN_ERR[1];
	bool is_debug = level[1] == KERN_DEBUG[1];
	struct va_format vaf;
	va_list args;

	if (is_debug && !(drm_debug & DRM_UT_DRIVER))
		return;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

110 111 112 113 114 115 116
	if (is_error)
		dev_printk(level, kdev, "%pV", &vaf);
	else
		dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
			   __builtin_return_address(0), &vaf);

	va_end(args);
117 118

	if (is_error && !shown_bug_once) {
119 120 121 122 123 124 125
		/*
		 * Ask the user to file a bug report for the error, except
		 * if they may have caused the bug by fiddling with unsafe
		 * module parameters.
		 */
		if (!test_taint(TAINT_USER))
			dev_notice(kdev, "%s", FDO_BUG_MSG);
126 127 128 129
		shown_bug_once = true;
	}
}

130 131 132 133 134 135 136
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
{
	switch (id) {
	case INTEL_PCH_IBX_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
137
		WARN_ON(!IS_GEN(dev_priv, 5));
138 139 140
		return PCH_IBX;
	case INTEL_PCH_CPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found CougarPoint PCH\n");
141
		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
142 143 144
		return PCH_CPT;
	case INTEL_PCH_PPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found PantherPoint PCH\n");
145
		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
		/* PantherPoint is CPT compatible */
		return PCH_CPT;
	case INTEL_PCH_LPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found LynxPoint PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
		return PCH_LPT;
	case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
		return PCH_LPT;
	case INTEL_PCH_WPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
		/* WildcatPoint is LPT compatible */
		return PCH_LPT;
	case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
		/* WildcatPoint is LPT compatible */
		return PCH_LPT;
	case INTEL_PCH_SPT_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
		return PCH_SPT;
	case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
		return PCH_SPT;
	case INTEL_PCH_KBP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
			!IS_COFFEELAKE(dev_priv));
		return PCH_KBP;
	case INTEL_PCH_CNP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
		return PCH_CNP;
	case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
		return PCH_CNP;
	case INTEL_PCH_ICP_DEVICE_ID_TYPE:
		DRM_DEBUG_KMS("Found Ice Lake PCH\n");
		WARN_ON(!IS_ICELAKE(dev_priv));
		return PCH_ICP;
	default:
		return PCH_NONE;
	}
}
199

200 201 202 203 204 205 206 207 208 209
static bool intel_is_virt_pch(unsigned short id,
			      unsigned short svendor, unsigned short sdevice)
{
	return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
		id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
		(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
		 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
		 sdevice == PCI_SUBDEVICE_ID_QEMU));
}

210 211
static unsigned short
intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
212
{
213
	unsigned short id = 0;
214 215 216 217 218 219 220 221

	/*
	 * In a virtualized passthrough environment we can be in a
	 * setup where the ISA bridge is not able to be passed through.
	 * In this case, a south bridge can be emulated and we have to
	 * make an educated guess as to which PCH is really there.
	 */

222 223 224 225 226 227
	if (IS_ICELAKE(dev_priv))
		id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
	else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
		id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
	else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
		id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
228 229 230 231
	else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
		id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
232 233 234 235
	else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
		id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
	else if (IS_GEN(dev_priv, 5))
		id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
236 237 238 239 240 241 242

	if (id)
		DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
	else
		DRM_DEBUG_KMS("Assuming no PCH\n");

	return id;
243 244
}

245
static void intel_detect_pch(struct drm_i915_private *dev_priv)
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
{
	struct pci_dev *pch = NULL;

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
	 */
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
261
		unsigned short id;
262
		enum intel_pch pch_type;
263 264 265 266 267 268

		if (pch->vendor != PCI_VENDOR_ID_INTEL)
			continue;

		id = pch->device & INTEL_PCH_DEVICE_ID_MASK;

269 270 271
		pch_type = intel_pch_type(dev_priv, id);
		if (pch_type != PCH_NONE) {
			dev_priv->pch_type = pch_type;
272 273
			dev_priv->pch_id = id;
			break;
274
		} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
275 276
					 pch->subsystem_device)) {
			id = intel_virt_detect_pch(dev_priv);
277 278 279 280 281 282
			pch_type = intel_pch_type(dev_priv, id);

			/* Sanity check virtual PCH id */
			if (WARN_ON(id && pch_type == PCH_NONE))
				id = 0;

283 284 285
			dev_priv->pch_type = pch_type;
			dev_priv->pch_id = id;
			break;
286 287
		}
	}
288 289 290 291 292

	/*
	 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
	 * display.
	 */
293
	if (pch && !HAS_DISPLAY(dev_priv)) {
294 295 296 297 298
		DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
		dev_priv->pch_type = PCH_NOP;
		dev_priv->pch_id = 0;
	}

299 300 301 302 303 304
	if (!pch)
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
}

305 306
static int i915_getparam_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file_priv)
307
{
308
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
309
	struct pci_dev *pdev = dev_priv->drm.pdev;
310 311 312 313 314 315 316
	drm_i915_getparam_t *param = data;
	int value;

	switch (param->param) {
	case I915_PARAM_IRQ_ACTIVE:
	case I915_PARAM_ALLOW_BATCHBUFFER:
	case I915_PARAM_LAST_DISPATCH:
317
	case I915_PARAM_HAS_EXEC_CONSTANTS:
318 319 320
		/* Reject all old ums/dri params. */
		return -ENODEV;
	case I915_PARAM_CHIPSET_ID:
D
David Weinehall 已提交
321
		value = pdev->device;
322 323
		break;
	case I915_PARAM_REVISION:
D
David Weinehall 已提交
324
		value = pdev->revision;
325 326 327 328 329 330 331 332
		break;
	case I915_PARAM_NUM_FENCES_AVAIL:
		value = dev_priv->num_fence_regs;
		break;
	case I915_PARAM_HAS_OVERLAY:
		value = dev_priv->overlay ? 1 : 0;
		break;
	case I915_PARAM_HAS_BSD:
333
		value = !!dev_priv->engine[VCS0];
334 335
		break;
	case I915_PARAM_HAS_BLT:
336
		value = !!dev_priv->engine[BCS0];
337 338
		break;
	case I915_PARAM_HAS_VEBOX:
339
		value = !!dev_priv->engine[VECS0];
340 341
		break;
	case I915_PARAM_HAS_BSD2:
342
		value = !!dev_priv->engine[VCS1];
343 344
		break;
	case I915_PARAM_HAS_LLC:
D
David Weinehall 已提交
345
		value = HAS_LLC(dev_priv);
346 347
		break;
	case I915_PARAM_HAS_WT:
D
David Weinehall 已提交
348
		value = HAS_WT(dev_priv);
349 350
		break;
	case I915_PARAM_HAS_ALIASING_PPGTT:
351
		value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
352 353
		break;
	case I915_PARAM_HAS_SEMAPHORES:
354
		value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
355 356 357 358 359 360 361 362
		break;
	case I915_PARAM_HAS_SECURE_BATCHES:
		value = capable(CAP_SYS_ADMIN);
		break;
	case I915_PARAM_CMD_PARSER_VERSION:
		value = i915_cmd_parser_get_version(dev_priv);
		break;
	case I915_PARAM_SUBSLICE_TOTAL:
363
		value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
364 365 366 367
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_EU_TOTAL:
368
		value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
369 370 371 372
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_HAS_GPU_RESET:
373 374
		value = i915_modparams.enable_hangcheck &&
			intel_has_gpu_reset(dev_priv);
375 376
		if (value && intel_has_reset_engine(dev_priv))
			value = 2;
377 378
		break;
	case I915_PARAM_HAS_RESOURCE_STREAMER:
379
		value = 0;
380
		break;
381
	case I915_PARAM_HAS_POOLED_EU:
D
David Weinehall 已提交
382
		value = HAS_POOLED_EU(dev_priv);
383 384
		break;
	case I915_PARAM_MIN_EU_IN_POOL:
385
		value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
386
		break;
387
	case I915_PARAM_HUC_STATUS:
388 389 390
		value = intel_huc_check_status(&dev_priv->huc);
		if (value < 0)
			return value;
391
		break;
392 393 394 395 396 397 398
	case I915_PARAM_MMAP_GTT_VERSION:
		/* Though we've started our numbering from 1, and so class all
		 * earlier versions as 0, in effect their value is undefined as
		 * the ioctl will report EINVAL for the unknown param!
		 */
		value = i915_gem_mmap_gtt_version();
		break;
399
	case I915_PARAM_HAS_SCHEDULER:
400
		value = dev_priv->caps.scheduler;
401
		break;
C
Chris Wilson 已提交
402

D
David Weinehall 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	case I915_PARAM_MMAP_VERSION:
		/* Remember to bump this if the version changes! */
	case I915_PARAM_HAS_GEM:
	case I915_PARAM_HAS_PAGEFLIPPING:
	case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
	case I915_PARAM_HAS_RELAXED_FENCING:
	case I915_PARAM_HAS_COHERENT_RINGS:
	case I915_PARAM_HAS_RELAXED_DELTA:
	case I915_PARAM_HAS_GEN7_SOL_RESET:
	case I915_PARAM_HAS_WAIT_TIMEOUT:
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
	case I915_PARAM_HAS_PINNED_BATCHES:
	case I915_PARAM_HAS_EXEC_NO_RELOC:
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
	case I915_PARAM_HAS_EXEC_SOFTPIN:
419
	case I915_PARAM_HAS_EXEC_ASYNC:
420
	case I915_PARAM_HAS_EXEC_FENCE:
421
	case I915_PARAM_HAS_EXEC_CAPTURE:
422
	case I915_PARAM_HAS_EXEC_BATCH_FIRST:
423
	case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
D
David Weinehall 已提交
424 425 426 427 428 429 430
		/* For the time being all of these are always true;
		 * if some supported hardware does not have one of these
		 * features this value needs to be provided from
		 * INTEL_INFO(), a feature macro, or similar.
		 */
		value = 1;
		break;
431 432 433
	case I915_PARAM_HAS_CONTEXT_ISOLATION:
		value = intel_engines_has_context_isolation(dev_priv);
		break;
434
	case I915_PARAM_SLICE_MASK:
435
		value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
436 437 438
		if (!value)
			return -ENODEV;
		break;
439
	case I915_PARAM_SUBSLICE_MASK:
440
		value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
441 442 443
		if (!value)
			return -ENODEV;
		break;
444
	case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
445
		value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
446
		break;
447 448 449
	case I915_PARAM_MMAP_GTT_COHERENT:
		value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
		break;
450 451 452 453 454
	default:
		DRM_DEBUG("Unknown parameter %d\n", param->param);
		return -EINVAL;
	}

455
	if (put_user(value, param->value))
456 457 458 459 460
		return -EFAULT;

	return 0;
}

461
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
462
{
463 464 465 466
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
467 468 469 470 471 472 473 474 475
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
476
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
477
{
478
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
479 480 481 482
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

483
	if (INTEL_GEN(dev_priv) >= 4)
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
		return ret;
	}

510
	if (INTEL_GEN(dev_priv) >= 4)
511 512 513 514 515 516 517 518 519 520
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
521
intel_setup_mchbar(struct drm_i915_private *dev_priv)
522
{
523
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
524 525 526
	u32 temp;
	bool enabled;

527
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
528 529 530 531
		return;

	dev_priv->mchbar_need_disable = false;

532
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
533 534 535 536 537 538 539 540 541 542 543
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

544
	if (intel_alloc_mchbar_resource(dev_priv))
545 546 547 548 549
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
550
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
551 552 553 554 555 556 557 558 559
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
560
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
561
{
562
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
563 564

	if (dev_priv->mchbar_need_disable) {
565
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
591
	struct drm_i915_private *dev_priv = cookie;
592

593
	intel_modeset_vga_set_state(dev_priv, state);
594 595 596 597 598 599 600
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

601 602 603
static int i915_resume_switcheroo(struct drm_device *dev);
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);

604 605 606 607 608 609 610 611 612
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };

	if (state == VGA_SWITCHEROO_ON) {
		pr_info("switched on\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		/* i915 resume handler doesn't set to D0 */
D
David Weinehall 已提交
613
		pci_set_power_state(pdev, PCI_D0);
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
		i915_resume_switcheroo(dev);
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
	} else {
		pr_info("switched off\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		i915_suspend_switcheroo(dev, pmm);
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
	}
}

static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	/*
	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
	 * locking inversion with the driver load path. And the access here is
	 * completely racy anyway. So don't bother with locking for now.
	 */
	return dev->open_count == 0;
}

static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
	.set_gpu_state = i915_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = i915_switcheroo_can_switch,
};

static int i915_load_modeset_init(struct drm_device *dev)
{
644
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
645
	struct pci_dev *pdev = dev_priv->drm.pdev;
646 647 648 649 650
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

651
	if (HAS_DISPLAY(dev_priv)) {
652 653 654 655 656 657
		ret = drm_vblank_init(&dev_priv->drm,
				      INTEL_INFO(dev_priv)->num_pipes);
		if (ret)
			goto out;
	}

658
	intel_bios_init(dev_priv);
659 660 661 662 663 664 665 666

	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
667
	ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
668 669 670 671 672
	if (ret && ret != -ENODEV)
		goto out;

	intel_register_dsm_handler();

D
David Weinehall 已提交
673
	ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
674 675 676 677 678 679 680 681 682 683 684 685 686 687
	if (ret)
		goto cleanup_vga_client;

	/* must happen before intel_power_domains_init_hw() on VLV/CHV */
	intel_update_rawclk(dev_priv);

	intel_power_domains_init_hw(dev_priv, false);

	intel_csr_ucode_init(dev_priv);

	ret = intel_irq_install(dev_priv);
	if (ret)
		goto cleanup_csr;

688
	intel_setup_gmbus(dev_priv);
689 690 691

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
692 693 694
	ret = intel_modeset_init(dev);
	if (ret)
		goto cleanup_irq;
695

696
	ret = i915_gem_init(dev_priv);
697
	if (ret)
698
		goto cleanup_modeset;
699

700
	intel_overlay_setup(dev_priv);
701

702
	if (!HAS_DISPLAY(dev_priv))
703 704 705 706 707 708 709 710 711
		return 0;

	ret = intel_fbdev_init(dev);
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
	intel_hpd_init(dev_priv);

712 713
	intel_init_ipc(dev_priv);

714 715 716
	return 0;

cleanup_gem:
717
	if (i915_gem_suspend(dev_priv))
718
		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
719
	i915_gem_fini(dev_priv);
720 721
cleanup_modeset:
	intel_modeset_cleanup(dev);
722 723
cleanup_irq:
	drm_irq_uninstall(dev);
724
	intel_teardown_gmbus(dev_priv);
725 726
cleanup_csr:
	intel_csr_ucode_fini(dev_priv);
727
	intel_power_domains_fini_hw(dev_priv);
D
David Weinehall 已提交
728
	vga_switcheroo_unregister_client(pdev);
729
cleanup_vga_client:
D
David Weinehall 已提交
730
	vga_client_register(pdev, NULL, NULL, NULL);
731 732 733 734 735 736 737
out:
	return ret;
}

static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	struct apertures_struct *ap;
738
	struct pci_dev *pdev = dev_priv->drm.pdev;
739 740 741 742 743 744 745 746
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	bool primary;
	int ret;

	ap = alloc_apertures(1);
	if (!ap)
		return -ENOMEM;

747
	ap->ranges[0].base = ggtt->gmadr.start;
748 749 750 751 752
	ap->ranges[0].size = ggtt->mappable_end;

	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

753
	ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

	kfree(ap);

	return ret;
}

#if !defined(CONFIG_VGA_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	return 0;
}
#elif !defined(CONFIG_DUMMY_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	return -ENODEV;
}
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	int ret = 0;

	DRM_INFO("Replacing VGA console driver\n");

	console_lock();
	if (con_is_bound(&vga_con))
		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
	if (ret == 0) {
		ret = do_unregister_con_driver(&vga_con);

		/* Ignore "already unregistered". */
		if (ret == -ENODEV)
			ret = 0;
	}
	console_unlock();

	return ret;
}
#endif

static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
813
	 * by the GPU. i915_retire_requests() is called directly when we
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
	DRM_ERROR("Failed to allocate workqueues.\n");

	return -ENOMEM;
}

842 843 844 845 846 847 848 849 850
static void i915_engines_cleanup(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, i915, id)
		kfree(engine);
}

851 852 853 854 855 856
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

857 858 859 860
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
861 862 863 864 865
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
866 867 868
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
869 870 871 872
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
873
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
874
	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
875

876
	if (pre) {
877 878
		DRM_ERROR("This is a pre-production stepping. "
			  "It may not be fully functional.\n");
879 880
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
881 882
}

883 884 885 886 887 888 889 890 891 892
/**
 * i915_driver_init_early - setup state not requiring device access
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
893
static int i915_driver_init_early(struct drm_i915_private *dev_priv)
894 895 896 897 898 899 900 901 902 903
{
	int ret = 0;

	if (i915_inject_load_failure())
		return -ENODEV;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
L
Lyude 已提交
904

905 906 907 908
	mutex_init(&dev_priv->sb_lock);
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
909
	mutex_init(&dev_priv->hdcp_comp_mutex);
910

911
	i915_memcpy_init_early(dev_priv);
912
	intel_runtime_pm_init_early(dev_priv);
913

914 915
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
916
		goto err_engines;
917

918 919 920 921
	ret = i915_gem_init_early(dev_priv);
	if (ret < 0)
		goto err_workqueues;

922
	/* This must be called before any calls to HAS_PCH_* */
923
	intel_detect_pch(dev_priv);
924

925 926
	intel_wopcm_init_early(&dev_priv->wopcm);
	intel_uc_init_early(dev_priv);
927
	intel_pm_setup(dev_priv);
928
	intel_init_dpio(dev_priv);
929 930 931
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
		goto err_uc;
932
	intel_irq_init(dev_priv);
933
	intel_hangcheck_init(dev_priv);
934 935 936
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);
937
	intel_display_crc_init(dev_priv);
938

939
	intel_detect_preproduction_hw(dev_priv);
940 941 942

	return 0;

943 944 945
err_uc:
	intel_uc_cleanup_early(dev_priv);
	i915_gem_cleanup_early(dev_priv);
946
err_workqueues:
947
	i915_workqueues_cleanup(dev_priv);
948 949
err_engines:
	i915_engines_cleanup(dev_priv);
950 951 952 953 954 955 956 957 958
	return ret;
}

/**
 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
959
	intel_irq_fini(dev_priv);
960
	intel_power_domains_cleanup(dev_priv);
961
	intel_uc_cleanup_early(dev_priv);
962
	i915_gem_cleanup_early(dev_priv);
963
	i915_workqueues_cleanup(dev_priv);
964
	i915_engines_cleanup(dev_priv);
965 966
}

967
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
968
{
D
David Weinehall 已提交
969
	struct pci_dev *pdev = dev_priv->drm.pdev;
970 971 972
	int mmio_bar;
	int mmio_size;

973
	mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
974 975 976 977 978 979 980 981
	/*
	 * Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
982
	if (INTEL_GEN(dev_priv) < 5)
983 984 985
		mmio_size = 512 * 1024;
	else
		mmio_size = 2 * 1024 * 1024;
D
David Weinehall 已提交
986
	dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
987 988 989 990 991 992 993
	if (dev_priv->regs == NULL) {
		DRM_ERROR("failed to map registers\n");

		return -EIO;
	}

	/* Try to make sure MCHBAR is enabled before poking at it */
994
	intel_setup_mchbar(dev_priv);
995 996 997 998

	return 0;
}

999
static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
1000
{
D
David Weinehall 已提交
1001
	struct pci_dev *pdev = dev_priv->drm.pdev;
1002

1003
	intel_teardown_mchbar(dev_priv);
D
David Weinehall 已提交
1004
	pci_iounmap(pdev, dev_priv->regs);
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
}

/**
 * i915_driver_init_mmio - setup device MMIO
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

1023
	if (i915_get_bridge_dev(dev_priv))
1024 1025
		return -EIO;

1026
	ret = i915_mmio_setup(dev_priv);
1027
	if (ret < 0)
1028
		goto err_bridge;
1029 1030

	intel_uncore_init(dev_priv);
1031

1032 1033 1034 1035
	intel_device_info_init_mmio(dev_priv);

	intel_uncore_prune(dev_priv);

1036 1037
	intel_uc_init_mmio(dev_priv);

1038 1039 1040 1041
	ret = intel_engines_init_mmio(dev_priv);
	if (ret)
		goto err_uncore;

1042
	i915_gem_init_mmio(dev_priv);
1043 1044 1045

	return 0;

1046 1047
err_uncore:
	intel_uncore_fini(dev_priv);
1048
	i915_mmio_cleanup(dev_priv);
1049
err_bridge:
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
	intel_uncore_fini(dev_priv);
1062
	i915_mmio_cleanup(dev_priv);
1063 1064 1065
	pci_dev_put(dev_priv->bridge_dev);
}

1066 1067
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
1068
	intel_gvt_sanitize_options(dev_priv);
1069 1070
}

V
Ville Syrjälä 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type

static const char *intel_dram_type_str(enum intel_dram_type type)
{
	static const char * const str[] = {
		DRAM_TYPE_STR(UNKNOWN),
		DRAM_TYPE_STR(DDR3),
		DRAM_TYPE_STR(DDR4),
		DRAM_TYPE_STR(LPDDR3),
		DRAM_TYPE_STR(LPDDR4),
	};

	if (type >= ARRAY_SIZE(str))
		type = INTEL_DRAM_UNKNOWN;

	return str[type];
}

#undef DRAM_TYPE_STR

1091 1092 1093 1094 1095
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
	return dimm->ranks * 64 / (dimm->width ?: 1);
}

1096 1097
/* Returns total GB for the whole DIMM */
static int skl_get_dimm_size(u16 val)
1098
{
1099 1100 1101 1102 1103 1104
	return val & SKL_DRAM_SIZE_MASK;
}

static int skl_get_dimm_width(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
1105
		return 0;
1106

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	switch (val & SKL_DRAM_WIDTH_MASK) {
	case SKL_DRAM_WIDTH_X8:
	case SKL_DRAM_WIDTH_X16:
	case SKL_DRAM_WIDTH_X32:
		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int skl_get_dimm_ranks(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
		return 0;

	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;

	return val + 1;
1127 1128
}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
/* Returns total GB for the whole DIMM */
static int cnl_get_dimm_size(u16 val)
{
	return (val & CNL_DRAM_SIZE_MASK) / 2;
}

static int cnl_get_dimm_width(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	switch (val & CNL_DRAM_WIDTH_MASK) {
	case CNL_DRAM_WIDTH_X8:
	case CNL_DRAM_WIDTH_X16:
	case CNL_DRAM_WIDTH_X32:
		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int cnl_get_dimm_ranks(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;

	return val + 1;
}

1162
static bool
1163
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
1164
{
1165 1166
	/* Convert total GB to Gb per DRAM device */
	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
1167 1168
}

1169
static void
1170 1171
skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
		       struct dram_dimm_info *dimm,
1172
		       int channel, char dimm_name, u16 val)
1173
{
1174 1175 1176 1177 1178 1179 1180 1181 1182
	if (INTEL_GEN(dev_priv) >= 10) {
		dimm->size = cnl_get_dimm_size(val);
		dimm->width = cnl_get_dimm_width(val);
		dimm->ranks = cnl_get_dimm_ranks(val);
	} else {
		dimm->size = skl_get_dimm_size(val);
		dimm->width = skl_get_dimm_width(val);
		dimm->ranks = skl_get_dimm_ranks(val);
	}
1183

1184 1185 1186 1187
	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
		      yesno(skl_is_16gb_dimm(dimm)));
}
1188

1189
static int
1190 1191
skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
			  struct dram_channel_info *ch,
1192 1193
			  int channel, u32 val)
{
1194 1195 1196 1197
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
			       channel, 'L', val & 0xffff);
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
			       channel, 'S', val >> 16);
1198

1199
	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
1200
		DRM_DEBUG_KMS("CH%u not populated\n", channel);
1201
		return -EINVAL;
1202
	}
1203

1204
	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
1205
		ch->ranks = 2;
1206
	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
1207
		ch->ranks = 2;
1208
	else
1209
		ch->ranks = 1;
1210

1211
	ch->is_16gb_dimm =
1212 1213
		skl_is_16gb_dimm(&ch->dimm_l) ||
		skl_is_16gb_dimm(&ch->dimm_s);
1214

1215 1216
	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
1217 1218 1219 1220

	return 0;
}

1221
static bool
1222 1223
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
			const struct dram_channel_info *ch1)
1224
{
1225
	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
1226 1227
		(ch0->dimm_s.size == 0 ||
		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
1228 1229
}

1230 1231 1232 1233
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
1234
	struct dram_channel_info ch0 = {}, ch1 = {};
1235
	u32 val;
1236 1237
	int ret;

1238
	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
1239
	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
1240 1241 1242
	if (ret == 0)
		dram_info->num_channels++;

1243
	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
1244
	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
	if (ret == 0)
		dram_info->num_channels++;

	if (dram_info->num_channels == 0) {
		DRM_INFO("Number of memory channels is zero\n");
		return -EINVAL;
	}

	/*
	 * If any of the channel is single rank channel, worst case output
	 * will be same as if single rank memory, so consider single rank
	 * memory.
	 */
1258 1259
	if (ch0.ranks == 1 || ch1.ranks == 1)
		dram_info->ranks = 1;
1260
	else
1261
		dram_info->ranks = max(ch0.ranks, ch1.ranks);
1262

1263
	if (dram_info->ranks == 0) {
1264 1265 1266
		DRM_INFO("couldn't get memory rank information\n");
		return -EINVAL;
	}
1267

1268
	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
1269

1270
	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
1271

1272 1273
	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
		      yesno(dram_info->symmetric_memory));
1274 1275 1276
	return 0;
}

V
Ville Syrjälä 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *dev_priv)
{
	u32 val;

	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);

	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
	case SKL_DRAM_DDR_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case SKL_DRAM_DDR_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case SKL_DRAM_DDR_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case SKL_DRAM_DDR_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

1299 1300 1301 1302 1303 1304 1305
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 mem_freq_khz, val;
	int ret;

V
Ville Syrjälä 已提交
1306 1307 1308
	dram_info->type = skl_get_dram_type(dev_priv);
	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));

1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
	ret = skl_dram_get_channels_info(dev_priv);
	if (ret)
		return ret;

	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_info->bandwidth_kbps = dram_info->num_channels *
							mem_freq_khz * 8;

	if (dram_info->bandwidth_kbps == 0) {
		DRM_INFO("Couldn't get system memory bandwidth\n");
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

1329 1330 1331 1332
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size(u32 val)
{
	switch (val & BXT_DRAM_SIZE_MASK) {
1333
	case BXT_DRAM_SIZE_4GBIT:
1334
		return 4;
1335
	case BXT_DRAM_SIZE_6GBIT:
1336
		return 6;
1337
	case BXT_DRAM_SIZE_8GBIT:
1338
		return 8;
1339
	case BXT_DRAM_SIZE_12GBIT:
1340
		return 12;
1341
	case BXT_DRAM_SIZE_16GBIT:
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
		return 16;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int bxt_get_dimm_width(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;

	return 8 << val;
}

static int bxt_get_dimm_ranks(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	switch (val & BXT_DRAM_RANK_MASK) {
	case BXT_DRAM_RANK_SINGLE:
		return 1;
	case BXT_DRAM_RANK_DUAL:
		return 2;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

V
Ville Syrjälä 已提交
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
static enum intel_dram_type bxt_get_dimm_type(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return INTEL_DRAM_UNKNOWN;

	switch (val & BXT_DRAM_TYPE_MASK) {
	case BXT_DRAM_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case BXT_DRAM_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case BXT_DRAM_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case BXT_DRAM_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

1395 1396 1397 1398 1399
static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
			      u32 val)
{
	dimm->width = bxt_get_dimm_width(val);
	dimm->ranks = bxt_get_dimm_ranks(val);
1400 1401 1402 1403 1404 1405

	/*
	 * Size in register is Gb per DRAM device. Convert to total
	 * GB to match the way we report this for non-LP platforms.
	 */
	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
1406 1407
}

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 dram_channels;
	u32 mem_freq_khz, val;
	u8 num_active_channels;
	int i;

	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
	num_active_channels = hweight32(dram_channels);

	/* Each active bit represents 4-byte channel */
	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);

	if (dram_info->bandwidth_kbps == 0) {
		DRM_INFO("Couldn't get system memory bandwidth\n");
		return -EINVAL;
	}

	/*
	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
	 */
	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1436
		struct dram_dimm_info dimm;
V
Ville Syrjälä 已提交
1437
		enum intel_dram_type type;
1438 1439 1440 1441 1442 1443

		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
		if (val == 0xFFFFFFFF)
			continue;

		dram_info->num_channels++;
1444 1445

		bxt_get_dimm_info(&dimm, val);
V
Ville Syrjälä 已提交
1446 1447 1448 1449 1450
		type = bxt_get_dimm_type(val);

		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
			dram_info->type != INTEL_DRAM_UNKNOWN &&
			dram_info->type != type);
1451

V
Ville Syrjälä 已提交
1452
		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
1453
			      i - BXT_D_CR_DRP0_DUNIT_START,
V
Ville Syrjälä 已提交
1454 1455
			      dimm.size, dimm.width, dimm.ranks,
			      intel_dram_type_str(type));
1456 1457 1458 1459 1460 1461

		/*
		 * If any of the channel is single rank channel,
		 * worst case output will be same as if single rank
		 * memory, so consider single rank memory.
		 */
1462
		if (dram_info->ranks == 0)
1463 1464
			dram_info->ranks = dimm.ranks;
		else if (dimm.ranks == 1)
1465
			dram_info->ranks = 1;
V
Ville Syrjälä 已提交
1466 1467 1468

		if (type != INTEL_DRAM_UNKNOWN)
			dram_info->type = type;
1469 1470
	}

V
Ville Syrjälä 已提交
1471 1472 1473
	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
	    dram_info->ranks == 0) {
		DRM_INFO("couldn't get memory information\n");
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	int ret;

1487 1488 1489 1490 1491 1492 1493
	/*
	 * Assume 16Gb DIMMs are present until proven otherwise.
	 * This is only used for the level 0 watermark latency
	 * w/a which does not apply to bxt/glk.
	 */
	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);

1494
	if (INTEL_GEN(dev_priv) < 9)
1495 1496
		return;

1497
	if (IS_GEN9_LP(dev_priv))
1498 1499
		ret = bxt_get_dram_info(dev_priv);
	else
1500
		ret = skl_get_dram_info(dev_priv);
1501 1502 1503
	if (ret)
		return;

1504 1505 1506 1507
	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
		      dram_info->bandwidth_kbps,
		      dram_info->num_channels);

1508
	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
1509
		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1510 1511
}

1512 1513 1514 1515 1516 1517 1518 1519 1520
/**
 * i915_driver_init_hw - setup state requiring device access
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
D
David Weinehall 已提交
1521
	struct pci_dev *pdev = dev_priv->drm.pdev;
1522 1523 1524 1525 1526
	int ret;

	if (i915_inject_load_failure())
		return -ENODEV;

1527
	intel_device_info_runtime_init(dev_priv);
1528

1529 1530 1531 1532 1533 1534 1535 1536 1537
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

1552
	intel_sanitize_options(dev_priv);
1553

1554 1555
	i915_perf_init(dev_priv);

1556
	ret = i915_ggtt_probe_hw(dev_priv);
1557
	if (ret)
1558
		goto err_perf;
1559

1560 1561 1562 1563
	/*
	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
	 * otherwise the vga fbdev driver falls over.
	 */
1564 1565 1566
	ret = i915_kick_out_firmware_fb(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1567
		goto err_ggtt;
1568 1569 1570 1571 1572
	}

	ret = i915_kick_out_vgacon(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting VGA console\n");
1573
		goto err_ggtt;
1574 1575
	}

1576
	ret = i915_ggtt_init_hw(dev_priv);
1577
	if (ret)
1578
		goto err_ggtt;
1579

1580
	ret = i915_ggtt_enable_hw(dev_priv);
1581 1582
	if (ret) {
		DRM_ERROR("failed to enable GGTT\n");
1583
		goto err_ggtt;
1584 1585
	}

D
David Weinehall 已提交
1586
	pci_set_master(pdev);
1587 1588

	/* overlay on gen2 is broken and can't address above 1G */
1589
	if (IS_GEN(dev_priv, 2)) {
D
David Weinehall 已提交
1590
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1591 1592 1593
		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

1594
			goto err_ggtt;
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
		}
	}

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
1606
	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
D
David Weinehall 已提交
1607
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1608 1609 1610 1611

		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

1612
			goto err_ggtt;
1613 1614 1615 1616 1617 1618 1619 1620
		}
	}

	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	intel_uncore_sanitize(dev_priv);

1621
	intel_gt_init_workarounds(dev_priv);
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	i915_gem_load_init_fences(dev_priv);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
1632 1633 1634 1635
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
1636 1637 1638 1639 1640 1641
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
1642
	 */
1643
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
1644
		if (pci_enable_msi(pdev) < 0)
1645 1646 1647
			DRM_DEBUG_DRIVER("can't enable MSI");
	}

1648 1649
	ret = intel_gvt_init(dev_priv);
	if (ret)
1650 1651 1652
		goto err_msi;

	intel_opregion_setup(dev_priv);
1653 1654 1655 1656 1657 1658
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
	intel_get_dram_info(dev_priv);

1659

1660 1661
	return 0;

1662 1663 1664 1665
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
	pm_qos_remove_request(&dev_priv->pm_qos);
1666
err_ggtt:
1667
	i915_ggtt_cleanup_hw(dev_priv);
1668 1669
err_perf:
	i915_perf_fini(dev_priv);
1670 1671 1672 1673 1674 1675 1676 1677 1678
	return ret;
}

/**
 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
D
David Weinehall 已提交
1679
	struct pci_dev *pdev = dev_priv->drm.pdev;
1680

1681 1682
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
1683 1684
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
1685 1686

	pm_qos_remove_request(&dev_priv->pm_qos);
1687
	i915_ggtt_cleanup_hw(dev_priv);
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
1699
	struct drm_device *dev = &dev_priv->drm;
1700

1701
	i915_gem_shrinker_register(dev_priv);
1702
	i915_pmu_register(dev_priv);
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713

	/*
	 * Notify a valid surface after modesetting,
	 * when running inside a VM.
	 */
	if (intel_vgpu_active(dev_priv))
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
D
David Weinehall 已提交
1714
		i915_setup_sysfs(dev_priv);
1715 1716 1717

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
1718 1719 1720
	} else
		DRM_ERROR("Failed to register driver for userspace access!\n");

1721
	if (HAS_DISPLAY(dev_priv)) {
1722 1723 1724 1725 1726
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

1727
	if (IS_GEN(dev_priv, 5))
1728 1729
		intel_gpu_ips_init(dev_priv);

1730
	intel_audio_init(dev_priv);
1731 1732 1733 1734 1735 1736 1737 1738 1739

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
1740 1741 1742 1743 1744

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
1745
	if (HAS_DISPLAY(dev_priv))
1746
		drm_kms_helper_poll_init(dev);
1747

1748
	intel_power_domains_enable(dev_priv);
1749
	intel_runtime_pm_enable(dev_priv);
1750 1751 1752 1753 1754 1755 1756 1757
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
1758
	intel_runtime_pm_disable(dev_priv);
1759
	intel_power_domains_disable(dev_priv);
1760

1761
	intel_fbdev_unregister(dev_priv);
1762
	intel_audio_deinit(dev_priv);
1763

1764 1765 1766 1767 1768 1769 1770
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

1771 1772 1773 1774
	intel_gpu_ips_teardown();
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

1775
	i915_perf_unregister(dev_priv);
1776
	i915_pmu_unregister(dev_priv);
1777

D
David Weinehall 已提交
1778
	i915_teardown_sysfs(dev_priv);
1779
	drm_dev_unregister(&dev_priv->drm);
1780

1781
	i915_gem_shrinker_unregister(dev_priv);
1782 1783
}

1784 1785 1786 1787 1788
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
	if (drm_debug & DRM_UT_DRIVER) {
		struct drm_printer p = drm_debug_printer("i915 device info:");

1789 1790 1791 1792 1793 1794 1795
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
			   INTEL_GEN(dev_priv));

		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1796
		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1797 1798 1799 1800 1801 1802
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
		DRM_INFO("DRM_I915_DEBUG enabled\n");
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1803 1804
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
		DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1805 1806
}

1807 1808 1809 1810 1811 1812 1813
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;
1814
	int err;
1815 1816 1817

	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
	if (!i915)
1818
		return ERR_PTR(-ENOMEM);
1819

1820 1821
	err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
	if (err) {
1822
		kfree(i915);
1823
		return ERR_PTR(err);
1824 1825 1826 1827 1828 1829 1830 1831 1832
	}

	i915->drm.pdev = pdev;
	i915->drm.dev_private = i915;
	pci_set_drvdata(pdev, &i915->drm);

	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
1833
	RUNTIME_INFO(i915)->device_id = pdev->device;
1834 1835

	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1836 1837
		     BITS_PER_TYPE(device_info->platform_mask));
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1838 1839 1840 1841

	return i915;
}

1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
static void i915_driver_destroy(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;

	drm_dev_fini(&i915->drm);
	kfree(i915);

	/* And make sure we never chase our dangling pointer from pci_dev */
	pci_set_drvdata(pdev, NULL);
}

1853 1854
/**
 * i915_driver_load - setup chip and create an initial config
1855 1856
 * @pdev: PCI device
 * @ent: matching PCI ID entry
1857 1858 1859 1860 1861 1862 1863
 *
 * The driver load routine has to do several things:
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
1864
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1865
{
1866 1867
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
1868 1869
	struct drm_i915_private *dev_priv;
	int ret;
1870

1871
	dev_priv = i915_driver_create(pdev, ent);
1872 1873
	if (IS_ERR(dev_priv))
		return PTR_ERR(dev_priv);
1874

1875 1876 1877 1878
	/* Disable nuclear pageflip by default on pre-ILK */
	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;

1879 1880
	ret = pci_enable_device(pdev);
	if (ret)
1881
		goto out_fini;
D
Damien Lespiau 已提交
1882

1883
	ret = i915_driver_init_early(dev_priv);
1884 1885
	if (ret < 0)
		goto out_pci_disable;
1886

1887
	disable_rpm_wakeref_asserts(dev_priv);
L
Linus Torvalds 已提交
1888

1889 1890 1891
	ret = i915_driver_init_mmio(dev_priv);
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
1892

1893 1894 1895
	ret = i915_driver_init_hw(dev_priv);
	if (ret < 0)
		goto out_cleanup_mmio;
1896

1897
	ret = i915_load_modeset_init(&dev_priv->drm);
1898
	if (ret < 0)
1899
		goto out_cleanup_hw;
1900 1901 1902

	i915_driver_register(dev_priv);

1903
	enable_rpm_wakeref_asserts(dev_priv);
1904

1905 1906
	i915_welcome_messages(dev_priv);

1907 1908 1909 1910 1911 1912 1913
	return 0;

out_cleanup_hw:
	i915_driver_cleanup_hw(dev_priv);
out_cleanup_mmio:
	i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
1914
	enable_rpm_wakeref_asserts(dev_priv);
1915 1916 1917
	i915_driver_cleanup_early(dev_priv);
out_pci_disable:
	pci_disable_device(pdev);
1918
out_fini:
1919
	i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1920
	i915_driver_destroy(dev_priv);
1921 1922 1923
	return ret;
}

1924
void i915_driver_unload(struct drm_device *dev)
1925
{
1926
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1927
	struct pci_dev *pdev = dev_priv->drm.pdev;
1928

1929
	disable_rpm_wakeref_asserts(dev_priv);
1930

1931 1932
	i915_driver_unregister(dev_priv);

1933 1934 1935
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

1936
	if (i915_gem_suspend(dev_priv))
1937
		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
B
Ben Widawsky 已提交
1938

1939
	drm_atomic_helper_shutdown(dev);
1940

1941 1942
	intel_gvt_cleanup(dev_priv);

1943 1944
	intel_modeset_cleanup(dev);

1945
	intel_bios_cleanup(dev_priv);
1946

D
David Weinehall 已提交
1947 1948
	vga_switcheroo_unregister_client(pdev);
	vga_client_register(pdev, NULL, NULL, NULL);
1949

1950
	intel_csr_ucode_fini(dev_priv);
1951

1952 1953
	/* Free error state after interrupts are fully disabled. */
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1954
	i915_reset_error_state(dev_priv);
1955

1956
	i915_gem_fini(dev_priv);
1957

1958
	intel_power_domains_fini_hw(dev_priv);
1959 1960 1961 1962

	i915_driver_cleanup_hw(dev_priv);
	i915_driver_cleanup_mmio(dev_priv);

1963
	enable_rpm_wakeref_asserts(dev_priv);
1964
	intel_runtime_pm_cleanup(dev_priv);
1965 1966 1967 1968 1969
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
1970 1971

	i915_driver_cleanup_early(dev_priv);
1972
	i915_driver_destroy(dev_priv);
1973 1974
}

1975
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1976
{
1977
	struct drm_i915_private *i915 = to_i915(dev);
1978
	int ret;
1979

1980
	ret = i915_gem_open(i915, file);
1981 1982
	if (ret)
		return ret;
1983

1984 1985
	return 0;
}
1986

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
2004

2005
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
2006
{
2007 2008
	struct drm_i915_file_private *file_priv = file->driver_priv;

2009
	mutex_lock(&dev->struct_mutex);
2010
	i915_gem_context_close(file);
2011 2012 2013 2014
	i915_gem_release(dev, file);
	mutex_unlock(&dev->struct_mutex);

	kfree(file_priv);
2015 2016
}

2017 2018
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
2019
	struct drm_device *dev = &dev_priv->drm;
2020
	struct intel_encoder *encoder;
2021 2022

	drm_modeset_lock_all(dev);
2023 2024 2025
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
2026 2027 2028
	drm_modeset_unlock_all(dev);
}

2029 2030
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
2031
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
2032

2033 2034 2035 2036 2037 2038 2039 2040
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
2041

2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);
	int err;

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
	err = i915_gem_suspend(i915);
	if (err)
		dev_err(&i915->drm.pdev->dev,
			"GEM idle failed, suspend/resume might fail\n");

	return err;
}

2061
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
2062
{
2063
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
2064
	struct pci_dev *pdev = dev_priv->drm.pdev;
2065
	pci_power_t opregion_target_state;
2066

2067 2068
	disable_rpm_wakeref_asserts(dev_priv);

2069 2070
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
2071
	intel_power_domains_disable(dev_priv);
2072

2073 2074
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
2075
	pci_save_state(pdev);
J
Jesse Barnes 已提交
2076

2077
	intel_display_suspend(dev);
2078

2079
	intel_dp_mst_suspend(dev_priv);
2080

2081 2082
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
2083

2084
	intel_suspend_encoders(dev_priv);
2085

2086
	intel_suspend_hw(dev_priv);
2087

2088
	i915_gem_suspend_gtt_mappings(dev_priv);
2089

2090
	i915_save_state(dev_priv);
2091

2092
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
2093
	intel_opregion_suspend(dev_priv, opregion_target_state);
2094

2095
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
2096

2097 2098
	dev_priv->suspend_count++;

2099
	intel_csr_ucode_suspend(dev_priv);
2100

2101 2102
	enable_rpm_wakeref_asserts(dev_priv);

2103
	return 0;
2104 2105
}

2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

2118
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2119
{
2120
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
2121
	struct pci_dev *pdev = dev_priv->drm.pdev;
2122 2123
	int ret;

2124 2125
	disable_rpm_wakeref_asserts(dev_priv);

2126 2127 2128
	i915_gem_suspend_late(dev_priv);

	intel_uncore_suspend(dev_priv);
2129

2130 2131
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
2132

2133
	ret = 0;
2134
	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
2135
		bxt_enable_dc9(dev_priv);
2136
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2137 2138 2139
		hsw_enable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
2140 2141 2142

	if (ret) {
		DRM_ERROR("Suspend complete failed: %d\n", ret);
2143
		intel_power_domains_resume(dev_priv);
2144

2145
		goto out;
2146 2147
	}

D
David Weinehall 已提交
2148
	pci_disable_device(pdev);
2149
	/*
2150
	 * During hibernation on some platforms the BIOS may try to access
2151 2152
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
2153 2154 2155 2156 2157 2158 2159
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
2160
	 */
2161
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
2162
		pci_set_power_state(pdev, PCI_D3hot);
2163

2164 2165
out:
	enable_rpm_wakeref_asserts(dev_priv);
2166 2167
	if (!dev_priv->uncore.user_forcewake.count)
		intel_runtime_pm_cleanup(dev_priv);
2168 2169

	return ret;
2170 2171
}

2172
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
2173 2174 2175
{
	int error;

2176
	if (!dev) {
2177 2178 2179 2180 2181
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

2182 2183 2184
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
			 state.event != PM_EVENT_FREEZE))
		return -EINVAL;
2185 2186 2187

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
2188

2189
	error = i915_drm_suspend(dev);
2190 2191 2192
	if (error)
		return error;

2193
	return i915_drm_suspend_late(dev, false);
J
Jesse Barnes 已提交
2194 2195
}

2196
static int i915_drm_resume(struct drm_device *dev)
2197
{
2198
	struct drm_i915_private *dev_priv = to_i915(dev);
2199
	int ret;
2200

2201
	disable_rpm_wakeref_asserts(dev_priv);
2202
	intel_sanitize_gt_powersave(dev_priv);
2203

2204 2205
	i915_gem_sanitize(dev_priv);

2206
	ret = i915_ggtt_enable_hw(dev_priv);
2207 2208 2209
	if (ret)
		DRM_ERROR("failed to re-enable GGTT\n");

2210 2211
	intel_csr_ucode_resume(dev_priv);

2212
	i915_restore_state(dev_priv);
2213
	intel_pps_unlock_regs_wa(dev_priv);
2214

2215
	intel_init_pch_refclk(dev_priv);
2216

2217 2218 2219 2220 2221
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
2222 2223
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
2224 2225 2226 2227 2228
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

2229 2230
	drm_mode_config_reset(dev);

2231
	i915_gem_resume(dev_priv);
2232

2233
	intel_modeset_init_hw(dev);
2234
	intel_init_clock_gating(dev_priv);
2235

2236 2237
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
2238
		dev_priv->display.hpd_irq_setup(dev_priv);
2239
	spin_unlock_irq(&dev_priv->irq_lock);
2240

2241
	intel_dp_mst_resume(dev_priv);
2242

2243 2244
	intel_display_resume(dev);

2245 2246
	drm_kms_helper_poll_enable(dev);

2247 2248 2249
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
2250
	 * bother with the tiny race here where we might lose hotplug
2251 2252 2253
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
2254

2255
	intel_opregion_resume(dev_priv);
2256

2257
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
2258

2259 2260
	intel_power_domains_enable(dev_priv);

2261 2262
	enable_rpm_wakeref_asserts(dev_priv);

2263
	return 0;
2264 2265
}

2266
static int i915_drm_resume_early(struct drm_device *dev)
2267
{
2268
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
2269
	struct pci_dev *pdev = dev_priv->drm.pdev;
2270
	int ret;
2271

2272 2273 2274 2275 2276 2277 2278 2279 2280
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
2292
	ret = pci_set_power_state(pdev, PCI_D0);
2293 2294
	if (ret) {
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2295
		return ret;
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
2311 2312
	if (pci_enable_device(pdev))
		return -EIO;
2313

D
David Weinehall 已提交
2314
	pci_set_master(pdev);
2315

2316 2317
	disable_rpm_wakeref_asserts(dev_priv);

2318
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2319
		ret = vlv_resume_prepare(dev_priv, false);
2320
	if (ret)
2321 2322
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);
2323

2324
	intel_uncore_resume_early(dev_priv);
2325

2326
	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
2327
		gen9_sanitize_dc_state(dev_priv);
2328
		bxt_disable_dc9(dev_priv);
2329
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2330
		hsw_disable_pc8(dev_priv);
2331
	}
2332

2333
	intel_uncore_sanitize(dev_priv);
2334

2335
	intel_power_domains_resume(dev_priv);
2336

2337
	intel_engines_sanitize(dev_priv, true);
2338

2339 2340
	enable_rpm_wakeref_asserts(dev_priv);

2341
	return ret;
2342 2343
}

2344
static int i915_resume_switcheroo(struct drm_device *dev)
2345
{
2346
	int ret;
2347

2348 2349 2350
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

2351
	ret = i915_drm_resume_early(dev);
2352 2353 2354
	if (ret)
		return ret;

2355 2356 2357
	return i915_drm_resume(dev);
}

2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
static int i915_pm_prepare(struct device *kdev)
{
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);

	if (!dev) {
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

	return i915_drm_prepare(dev);
}

2374
static int i915_pm_suspend(struct device *kdev)
2375
{
2376 2377
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
2378

2379 2380
	if (!dev) {
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2381 2382
		return -ENODEV;
	}
2383

2384
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2385 2386
		return 0;

2387
	return i915_drm_suspend(dev);
2388 2389
}

2390
static int i915_pm_suspend_late(struct device *kdev)
2391
{
2392
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2393 2394

	/*
D
Damien Lespiau 已提交
2395
	 * We have a suspend ordering issue with the snd-hda driver also
2396 2397 2398 2399 2400 2401 2402
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
2403
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2404
		return 0;
2405

2406
	return i915_drm_suspend_late(dev, false);
2407 2408
}

2409
static int i915_pm_poweroff_late(struct device *kdev)
2410
{
2411
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2412

2413
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2414 2415
		return 0;

2416
	return i915_drm_suspend_late(dev, true);
2417 2418
}

2419
static int i915_pm_resume_early(struct device *kdev)
2420
{
2421
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2422

2423
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2424 2425
		return 0;

2426
	return i915_drm_resume_early(dev);
2427 2428
}

2429
static int i915_pm_resume(struct device *kdev)
2430
{
2431
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2432

2433
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2434 2435
		return 0;

2436
	return i915_drm_resume(dev);
2437 2438
}

2439
/* freeze: before creating the hibernation_image */
2440
static int i915_pm_freeze(struct device *kdev)
2441
{
2442
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2443 2444
	int ret;

2445 2446 2447 2448 2449
	if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(dev);
		if (ret)
			return ret;
	}
2450 2451 2452 2453 2454 2455

	ret = i915_gem_freeze(kdev_to_i915(kdev));
	if (ret)
		return ret;

	return 0;
2456 2457
}

2458
static int i915_pm_freeze_late(struct device *kdev)
2459
{
2460
	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2461 2462
	int ret;

2463 2464 2465 2466 2467
	if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(dev, true);
		if (ret)
			return ret;
	}
2468

2469
	ret = i915_gem_freeze_late(kdev_to_i915(kdev));
2470 2471 2472 2473
	if (ret)
		return ret;

	return 0;
2474 2475 2476
}

/* thaw: called after creating the hibernation image, but before turning off. */
2477
static int i915_pm_thaw_early(struct device *kdev)
2478
{
2479
	return i915_pm_resume_early(kdev);
2480 2481
}

2482
static int i915_pm_thaw(struct device *kdev)
2483
{
2484
	return i915_pm_resume(kdev);
2485 2486 2487
}

/* restore: called after loading the hibernation image. */
2488
static int i915_pm_restore_early(struct device *kdev)
2489
{
2490
	return i915_pm_resume_early(kdev);
2491 2492
}

2493
static int i915_pm_restore(struct device *kdev)
2494
{
2495
	return i915_pm_resume(kdev);
2496 2497
}

2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2537
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2538 2539

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2540
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2581
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
2593
	s->pcbr			= I915_READ(VLV_PCBR);
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2619
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2620 2621

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2622
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2663
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
2688
	I915_WRITE(VLV_PCBR,			s->pcbr);
2689 2690 2691
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

2692 2693 2694
static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
				  u32 mask, u32 val)
{
2695 2696 2697 2698
	i915_reg_t reg = VLV_GTLC_PW_STATUS;
	u32 reg_value;
	int ret;

2699 2700 2701 2702 2703 2704 2705
	/* The HW does not like us polling for PW_STATUS frequently, so
	 * use the sleeping loop rather than risk the busy spin within
	 * intel_wait_for_register().
	 *
	 * Transitioning between RC6 states should be at most 2ms (see
	 * valleyview_enable_rps) so use a 3ms timeout.
	 */
2706 2707 2708 2709 2710 2711
	ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);

	/* just trace the final value */
	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);

	return ret;
2712 2713
}

2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

2728 2729 2730 2731 2732
	err = intel_wait_for_register(dev_priv,
				      VLV_GTLC_SURVIVABILITY_REG,
				      VLV_GFX_CLK_STATUS_BIT,
				      VLV_GFX_CLK_STATUS_BIT,
				      20);
2733 2734 2735 2736 2737 2738 2739
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
}

2740 2741
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
2742
	u32 mask;
2743
	u32 val;
2744
	int err;
2745 2746 2747 2748 2749 2750 2751 2752

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

2753 2754 2755 2756
	mask = VLV_GTLC_ALLOWWAKEACK;
	val = allow ? mask : 0;

	err = vlv_wait_for_pw_status(dev_priv, mask, val);
2757 2758
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
2759

2760 2761 2762
	return err;
}

2763 2764
static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				  bool wait_for_on)
2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
{
	u32 mask;
	u32 val;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
2775 2776 2777
	 *
	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
	 * reset and we are trying to force the machine to sleep.
2778
	 */
2779
	if (vlv_wait_for_pw_status(dev_priv, mask, val))
2780 2781
		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
				 onoff(wait_for_on));
2782 2783 2784 2785 2786 2787 2788
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

2789
	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2790 2791 2792
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

2793
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2794 2795 2796 2797 2798 2799 2800 2801
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
2802
	vlv_wait_for_gt_wells(dev_priv, false);
2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
2816

2817
	if (!IS_CHERRYVIEW(dev_priv))
2818
		vlv_save_gunit_s0ix_state(dev_priv);
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

2835 2836
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
{
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

2848
	if (!IS_CHERRYVIEW(dev_priv))
2849
		vlv_restore_gunit_s0ix_state(dev_priv);
2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

2861
	if (rpm_resume)
2862
		intel_init_clock_gating(dev_priv);
2863 2864 2865 2866

	return ret;
}

2867
static int intel_runtime_suspend(struct device *kdev)
2868
{
2869
	struct pci_dev *pdev = to_pci_dev(kdev);
2870
	struct drm_device *dev = pci_get_drvdata(pdev);
2871
	struct drm_i915_private *dev_priv = to_i915(dev);
2872
	int ret;
2873

2874
	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
2875 2876
		return -ENODEV;

2877
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2878 2879
		return -ENODEV;

2880 2881
	DRM_DEBUG_KMS("Suspending device\n");

2882 2883
	disable_rpm_wakeref_asserts(dev_priv);

2884 2885 2886 2887
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
2888
	i915_gem_runtime_suspend(dev_priv);
2889

2890
	intel_uc_suspend(dev_priv);
2891

2892
	intel_runtime_pm_disable_interrupts(dev_priv);
2893

2894 2895
	intel_uncore_suspend(dev_priv);

2896
	ret = 0;
2897 2898 2899 2900
	if (INTEL_GEN(dev_priv) >= 11) {
		icl_display_core_uninit(dev_priv);
		bxt_enable_dc9(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
2901 2902 2903 2904 2905 2906 2907 2908
		bxt_display_core_uninit(dev_priv);
		bxt_enable_dc9(dev_priv);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		hsw_enable_pc8(dev_priv);
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		ret = vlv_suspend_complete(dev_priv);
	}

2909 2910
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2911 2912
		intel_uncore_runtime_resume(dev_priv);

2913
		intel_runtime_pm_enable_interrupts(dev_priv);
2914

2915
		intel_uc_resume(dev_priv);
2916 2917 2918 2919

		i915_gem_init_swizzling(dev_priv);
		i915_gem_restore_fences(dev_priv);

2920 2921
		enable_rpm_wakeref_asserts(dev_priv);

2922 2923
		return ret;
	}
2924

2925
	enable_rpm_wakeref_asserts(dev_priv);
2926
	intel_runtime_pm_cleanup(dev_priv);
2927

2928
	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
2929 2930
		DRM_ERROR("Unclaimed access detected prior to suspending\n");

2931
	dev_priv->runtime_pm.suspended = true;
2932 2933

	/*
2934 2935
	 * FIXME: We really should find a document that references the arguments
	 * used below!
2936
	 */
2937
	if (IS_BROADWELL(dev_priv)) {
2938 2939 2940 2941 2942 2943
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
2944
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2945
	} else {
2946 2947 2948 2949 2950 2951 2952
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
2953
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
2954
	}
2955

2956
	assert_forcewakes_inactive(dev_priv);
2957

2958
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2959 2960
		intel_hpd_poll_init(dev_priv);

2961
	DRM_DEBUG_KMS("Device suspended\n");
2962 2963 2964
	return 0;
}

2965
static int intel_runtime_resume(struct device *kdev)
2966
{
2967
	struct pci_dev *pdev = to_pci_dev(kdev);
2968
	struct drm_device *dev = pci_get_drvdata(pdev);
2969
	struct drm_i915_private *dev_priv = to_i915(dev);
2970
	int ret = 0;
2971

2972
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2973
		return -ENODEV;
2974 2975 2976

	DRM_DEBUG_KMS("Resuming device\n");

2977
	WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
2978 2979
	disable_rpm_wakeref_asserts(dev_priv);

2980
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
2981
	dev_priv->runtime_pm.suspended = false;
2982 2983
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2984

2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
	if (INTEL_GEN(dev_priv) >= 11) {
		bxt_disable_dc9(dev_priv);
		icl_display_core_init(dev_priv, true);
		if (dev_priv->csr.dmc_payload) {
			if (dev_priv->csr.allowed_dc_mask &
			    DC_STATE_EN_UPTO_DC6)
				skl_enable_dc6(dev_priv);
			else if (dev_priv->csr.allowed_dc_mask &
				 DC_STATE_EN_UPTO_DC5)
				gen9_enable_dc5(dev_priv);
		}
	} else if (IS_GEN9_LP(dev_priv)) {
2997 2998
		bxt_disable_dc9(dev_priv);
		bxt_display_core_init(dev_priv, true);
2999 3000 3001
		if (dev_priv->csr.dmc_payload &&
		    (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
			gen9_enable_dc5(dev_priv);
3002
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3003
		hsw_disable_pc8(dev_priv);
3004
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3005
		ret = vlv_resume_prepare(dev_priv, true);
3006
	}
3007

3008 3009
	intel_uncore_runtime_resume(dev_priv);

3010 3011
	intel_runtime_pm_enable_interrupts(dev_priv);

3012
	intel_uc_resume(dev_priv);
3013

3014 3015 3016 3017
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
3018
	i915_gem_init_swizzling(dev_priv);
3019
	i915_gem_restore_fences(dev_priv);
3020

3021 3022 3023 3024 3025
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
3026
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
3027 3028
		intel_hpd_init(dev_priv);

3029 3030
	intel_enable_ipc(dev_priv);

3031 3032
	enable_rpm_wakeref_asserts(dev_priv);

3033 3034 3035 3036 3037 3038
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
3039 3040
}

3041
const struct dev_pm_ops i915_pm_ops = {
3042 3043 3044 3045
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
3046
	.prepare = i915_pm_prepare,
3047
	.suspend = i915_pm_suspend,
3048 3049
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
3050
	.resume = i915_pm_resume,
3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
3067 3068 3069 3070
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
3071
	.poweroff = i915_pm_suspend,
3072
	.poweroff_late = i915_pm_poweroff_late,
3073 3074
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
3075 3076

	/* S0ix (via runtime suspend) event handlers */
3077 3078
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
3079 3080
};

3081
static const struct vm_operations_struct i915_gem_vm_ops = {
3082
	.fault = i915_gem_fault,
3083 3084
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
3085 3086
};

3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
	.compat_ioctl = i915_compat_ioctl,
	.llseek = noop_llseek,
};

3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
3113
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3125 3126
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
3142 3143
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
3144
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
3145
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
3146
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
3147 3148 3149 3150
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
3151 3152 3153 3154 3155 3156 3157 3158
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
3159
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
3160 3161
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
L
Lionel Landwerlin 已提交
3162
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3163 3164
};

L
Linus Torvalds 已提交
3165
static struct drm_driver driver = {
3166 3167
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
3168
	 */
3169
	.driver_features =
D
Daniel Vetter 已提交
3170
	    DRIVER_GEM | DRIVER_PRIME |
3171
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
3172
	.release = i915_driver_release,
3173
	.open = i915_driver_open,
3174
	.lastclose = i915_driver_lastclose,
3175
	.postclose = i915_driver_postclose,
3176

3177
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
3178
	.gem_free_object_unlocked = i915_gem_free_object,
3179
	.gem_vm_ops = &i915_gem_vm_ops,
3180 3181 3182 3183 3184 3185

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

3186
	.dumb_create = i915_gem_dumb_create,
3187
	.dumb_map_offset = i915_gem_mmap_gtt,
L
Linus Torvalds 已提交
3188
	.ioctls = i915_ioctls,
3189
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
3190
	.fops = &i915_driver_fops,
3191 3192 3193 3194 3195 3196
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
3197
};
3198 3199 3200 3201

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_drm.c"
#endif