i915_drv.c 50.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38
#include <linux/pnp.h>
#include <linux/slab.h>
39
#include <linux/vga_switcheroo.h>
40 41 42
#include <linux/vt.h>
#include <acpi/video.h>

43
#include <drm/drm_atomic_helper.h>
44 45
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
46
#include <drm/drm_managed.h>
47
#include <drm/drm_probe_helper.h>
48

49 50 51 52
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
53
#include "display/intel_csr.h"
54
#include "display/intel_display_debugfs.h"
55
#include "display/intel_display_types.h"
56
#include "display/intel_dp.h"
57 58 59 60 61
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
62
#include "display/intel_vga.h"
63

64
#include "gem/i915_gem_context.h"
65
#include "gem/i915_gem_ioctls.h"
66
#include "gem/i915_gem_mman.h"
67
#include "gt/intel_gt.h"
68
#include "gt/intel_gt_pm.h"
69
#include "gt/intel_rc6.h"
70

71
#include "i915_debugfs.h"
72
#include "i915_drv.h"
73
#include "i915_ioc32.h"
74
#include "i915_irq.h"
75
#include "i915_memcpy.h"
76
#include "i915_perf.h"
L
Lionel Landwerlin 已提交
77
#include "i915_query.h"
78
#include "i915_suspend.h"
79
#include "i915_switcheroo.h"
80
#include "i915_sysfs.h"
81
#include "i915_trace.h"
82
#include "i915_vgpu.h"
83
#include "intel_dram.h"
84
#include "intel_gvt.h"
85
#include "intel_memory_region.h"
86
#include "intel_pm.h"
87
#include "intel_sideband.h"
88
#include "vlv_suspend.h"
J
Jesse Barnes 已提交
89

90 91
static struct drm_driver driver;

92
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
93
{
94 95 96 97
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
98
	if (!dev_priv->bridge_dev) {
99
		drm_err(&dev_priv->drm, "bridge device not found\n");
100 101 102 103 104 105 106
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
107
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
108
{
109
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
110 111 112 113
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

114
	if (INTEL_GEN(dev_priv) >= 4)
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
136
		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
137 138 139 140
		dev_priv->mch_res.start = 0;
		return ret;
	}

141
	if (INTEL_GEN(dev_priv) >= 4)
142 143 144 145 146 147 148 149 150 151
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
152
intel_setup_mchbar(struct drm_i915_private *dev_priv)
153
{
154
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
155 156 157
	u32 temp;
	bool enabled;

158
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
159 160 161 162
		return;

	dev_priv->mchbar_need_disable = false;

163
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
164 165 166 167 168 169 170 171 172 173 174
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

175
	if (intel_alloc_mchbar_resource(dev_priv))
176 177 178 179 180
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
181
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
182 183 184 185 186 187 188 189 190
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
191
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
192
{
193
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
194 195

	if (dev_priv->mchbar_need_disable) {
196
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
224
	 * by the GPU. i915_retire_requests() is called directly when we
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
248
	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
249 250 251 252 253 254 255 256 257 258

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

259 260 261 262
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
263 264 265 266 267
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
268 269 270
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
271 272 273 274
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
275
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
276
	pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
277
	pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
278

279
	if (pre) {
280
		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
281
			  "It may not be fully functional.\n");
282 283
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
284 285
}

286 287 288 289 290 291
static void sanitize_gpu(struct drm_i915_private *i915)
{
	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
		__intel_gt_reset(&i915->gt, ALL_ENGINES);
}

292
/**
293
 * i915_driver_early_probe - setup state not requiring device access
294 295 296 297 298 299 300 301
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
302
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
303 304 305
{
	int ret = 0;

306
	if (i915_inject_probe_failure(dev_priv))
307 308
		return -ENODEV;

309 310
	intel_device_info_subplatform_init(dev_priv);

311
	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
312
	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
313

314 315 316
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
L
Lyude 已提交
317

318
	mutex_init(&dev_priv->sb_lock);
319
	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
320

321 322 323
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
324
	mutex_init(&dev_priv->hdcp_comp_mutex);
325

326
	i915_memcpy_init_early(dev_priv);
327
	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
328

329 330
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
331
		return ret;
332

333
	ret = vlv_suspend_init(dev_priv);
334 335 336
	if (ret < 0)
		goto err_workqueues;

337 338
	intel_wopcm_init_early(&dev_priv->wopcm);

339
	intel_gt_init_early(&dev_priv->gt, dev_priv);
340

341
	i915_gem_init_early(dev_priv);
342

343
	/* This must be called before any calls to HAS_PCH_* */
344
	intel_detect_pch(dev_priv);
345

346
	intel_pm_setup(dev_priv);
347 348
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
349
		goto err_gem;
350 351 352 353 354
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);

355
	intel_detect_preproduction_hw(dev_priv);
356 357 358

	return 0;

359
err_gem:
360
	i915_gem_cleanup_early(dev_priv);
361
	intel_gt_driver_late_release(&dev_priv->gt);
362
	vlv_suspend_cleanup(dev_priv);
363
err_workqueues:
364 365 366 367 368
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
369
 * i915_driver_late_release - cleanup the setup done in
370
 *			       i915_driver_early_probe()
371 372
 * @dev_priv: device private
 */
373
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
374
{
375
	intel_irq_fini(dev_priv);
376
	intel_power_domains_cleanup(dev_priv);
377
	i915_gem_cleanup_early(dev_priv);
378
	intel_gt_driver_late_release(&dev_priv->gt);
379
	vlv_suspend_cleanup(dev_priv);
380
	i915_workqueues_cleanup(dev_priv);
381

382
	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
383
	mutex_destroy(&dev_priv->sb_lock);
384 385

	i915_params_free(&dev_priv->params);
386 387 388
}

/**
389
 * i915_driver_mmio_probe - setup device MMIO
390 391 392 393 394 395 396
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
397
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
398 399 400
{
	int ret;

401
	if (i915_inject_probe_failure(dev_priv))
402 403
		return -ENODEV;

404
	if (i915_get_bridge_dev(dev_priv))
405 406
		return -EIO;

407
	ret = intel_uncore_init_mmio(&dev_priv->uncore);
408
	if (ret < 0)
409
		goto err_bridge;
410

411 412
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
413

414
	ret = intel_gt_init_mmio(&dev_priv->gt);
415 416 417
	if (ret)
		goto err_uncore;

418 419 420
	/* As early as possible, scrub existing GPU state before clobbering */
	sanitize_gpu(dev_priv);

421 422
	return 0;

423
err_uncore:
424
	intel_teardown_mchbar(dev_priv);
425
	intel_uncore_fini_mmio(&dev_priv->uncore);
426
err_bridge:
427 428 429 430 431 432
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
433
 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
434 435
 * @dev_priv: device private
 */
436
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
437
{
438
	intel_teardown_mchbar(dev_priv);
439
	intel_uncore_fini_mmio(&dev_priv->uncore);
440 441 442
	pci_dev_put(dev_priv->bridge_dev);
}

443 444
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
445
	intel_gvt_sanitize_options(dev_priv);
446 447
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
/**
 * i915_set_dma_info - set all relevant PCI dma info as configured for the
 * platform
 * @i915: valid i915 instance
 *
 * Set the dma max segment size, device and coherent masks.  The dma mask set
 * needs to occur before i915_ggtt_probe_hw.
 *
 * A couple of platforms have special needs.  Address them as well.
 *
 */
static int i915_set_dma_info(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;
	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
	int ret;

	GEM_BUG_ON(!mask_size);

	/*
	 * We don't have a max segment size, so set it to the max so sg's
	 * debugging layer doesn't complain
	 */
	dma_set_max_seg_size(&pdev->dev, UINT_MAX);

	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN(i915, 2))
		mask_size = 30;

	/*
	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_I965G(i915) || IS_I965GM(i915))
		mask_size = 32;

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	return 0;

mask_err:
	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
	return ret;
}

504
/**
505
 * i915_driver_hw_probe - setup state requiring device access
506 507 508 509 510
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
511
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
512
{
D
David Weinehall 已提交
513
	struct pci_dev *pdev = dev_priv->drm.pdev;
514 515
	int ret;

516
	if (i915_inject_probe_failure(dev_priv))
517 518
		return -ENODEV;

519
	intel_device_info_runtime_init(dev_priv);
520

521 522
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
523
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
524 525 526 527 528 529
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

530 531 532 533 534 535 536 537 538 539 540 541 542 543
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

544
	intel_sanitize_options(dev_priv);
545

546
	/* needs to be done before ggtt probe */
547
	intel_dram_edram_detect(dev_priv);
548

549 550 551 552
	ret = i915_set_dma_info(dev_priv);
	if (ret)
		return ret;

553 554
	i915_perf_init(dev_priv);

555
	ret = i915_ggtt_probe_hw(dev_priv);
556
	if (ret)
557
		goto err_perf;
558

559 560
	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
	if (ret)
561
		goto err_ggtt;
562

563
	ret = i915_ggtt_init_hw(dev_priv);
564
	if (ret)
565
		goto err_ggtt;
566

567 568 569 570
	ret = intel_memory_regions_hw_probe(dev_priv);
	if (ret)
		goto err_ggtt;

571
	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
572

573
	ret = i915_ggtt_enable_hw(dev_priv);
574
	if (ret) {
575
		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
576
		goto err_mem_regions;
577 578
	}

D
David Weinehall 已提交
579
	pci_set_master(pdev);
580

581
	cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
582

583
	intel_gt_init_workarounds(dev_priv);
584 585 586 587 588 589 590 591 592

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
593 594 595 596
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
597 598 599 600 601 602
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
603
	 */
604
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
605
		if (pci_enable_msi(pdev) < 0)
606
			drm_dbg(&dev_priv->drm, "can't enable MSI");
607 608
	}

609 610
	ret = intel_gvt_init(dev_priv);
	if (ret)
611 612 613
		goto err_msi;

	intel_opregion_setup(dev_priv);
614 615 616 617
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
618
	intel_dram_detect(dev_priv);
619

620 621
	intel_pcode_init(dev_priv);

622
	intel_bw_init_hw(dev_priv);
623

624 625
	return 0;

626 627 628
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
629
	cpu_latency_qos_remove_request(&dev_priv->pm_qos);
630 631
err_mem_regions:
	intel_memory_regions_driver_release(dev_priv);
632
err_ggtt:
633
	i915_ggtt_driver_release(dev_priv);
634 635
err_perf:
	i915_perf_fini(dev_priv);
636 637 638 639
	return ret;
}

/**
640
 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
641 642
 * @dev_priv: device private
 */
643
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
644
{
D
David Weinehall 已提交
645
	struct pci_dev *pdev = dev_priv->drm.pdev;
646

647 648
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
649 650
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
651

652
	cpu_latency_qos_remove_request(&dev_priv->pm_qos);
653 654 655 656 657 658 659 660 661 662 663
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
664
	struct drm_device *dev = &dev_priv->drm;
665

666
	i915_gem_driver_register(dev_priv);
667
	i915_pmu_register(dev_priv);
668

669
	intel_vgpu_register(dev_priv);
670 671 672 673

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
674
		intel_display_debugfs_register(dev_priv);
D
David Weinehall 已提交
675
		i915_setup_sysfs(dev_priv);
676 677 678

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
679
	} else
680 681
		drm_err(&dev_priv->drm,
			"Failed to register driver for userspace access!\n");
682

683
	if (HAS_DISPLAY(dev_priv)) {
684 685 686 687 688
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

689
	intel_gt_driver_register(&dev_priv->gt);
690

691
	intel_audio_init(dev_priv);
692 693 694 695 696 697 698 699 700

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
701 702 703 704 705

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
706
	if (HAS_DISPLAY(dev_priv))
707
		drm_kms_helper_poll_init(dev);
708

709
	intel_power_domains_enable(dev_priv);
710
	intel_runtime_pm_enable(&dev_priv->runtime_pm);
711 712 713 714 715

	intel_register_dsm_handler();

	if (i915_switcheroo_register(dev_priv))
		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
716 717 718 719 720 721 722 723
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
724 725 726 727
	i915_switcheroo_unregister(dev_priv);

	intel_unregister_dsm_handler();

728
	intel_runtime_pm_disable(&dev_priv->runtime_pm);
729
	intel_power_domains_disable(dev_priv);
730

731
	intel_fbdev_unregister(dev_priv);
732
	intel_audio_deinit(dev_priv);
733

734 735 736 737 738 739 740
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

741
	intel_gt_driver_unregister(&dev_priv->gt);
742 743 744
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

745
	i915_perf_unregister(dev_priv);
746
	i915_pmu_unregister(dev_priv);
747

D
David Weinehall 已提交
748
	i915_teardown_sysfs(dev_priv);
749
	drm_dev_unplug(&dev_priv->drm);
750

751
	i915_gem_driver_unregister(dev_priv);
752 753
}

754 755
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
756
	if (drm_debug_enabled(DRM_UT_DRIVER)) {
757 758
		struct drm_printer p = drm_debug_printer("i915 device info:");

759
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
760 761 762
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
763 764
			   intel_subplatform(RUNTIME_INFO(dev_priv),
					     INTEL_INFO(dev_priv)->platform),
765 766
			   INTEL_GEN(dev_priv));

767 768
		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
769
		intel_gt_info_print(&dev_priv->gt.info, &p);
770 771 772
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
773
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
774
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
775
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
776
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
777 778
		drm_info(&dev_priv->drm,
			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
779 780
}

781 782 783 784 785 786 787 788
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;

D
Daniel Vetter 已提交
789 790 791 792
	i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
				  struct drm_i915_private, drm);
	if (IS_ERR(i915))
		return i915;
793

794 795
	i915->drm.pdev = pdev;
	pci_set_drvdata(pdev, i915);
796

797 798 799
	/* Device parameters start as a copy of module parameters. */
	i915_params_copy(&i915->params, &i915_modparams);

800 801 802
	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
803
	RUNTIME_INFO(i915)->device_id = pdev->device;
804

805
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
806 807 808 809

	return i915;
}

810
/**
811
 * i915_driver_probe - setup chip and create an initial config
812 813
 * @pdev: PCI device
 * @ent: matching PCI ID entry
814
 *
815
 * The driver probe routine has to do several things:
816 817 818 819 820
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
821
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
822
{
823 824
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
825
	struct drm_i915_private *i915;
826
	int ret;
827

828 829 830
	i915 = i915_driver_create(pdev, ent);
	if (IS_ERR(i915))
		return PTR_ERR(i915);
831

832
	/* Disable nuclear pageflip by default on pre-ILK */
833
	if (!i915->params.nuclear_pageflip && match_info->gen < 5)
834
		i915->drm.driver_features &= ~DRIVER_ATOMIC;
835

836 837 838 839
	/*
	 * Check if we support fake LMEM -- for now we only unleash this for
	 * the live selftests(test-and-exit).
	 */
840
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
841
	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
842
		if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
843
		    i915->params.fake_lmem_start) {
844
			mkwrite_device_info(i915)->memory_regions =
845
				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
846
			GEM_BUG_ON(!HAS_LMEM(i915));
847 848
		}
	}
849
#endif
850

851 852
	ret = pci_enable_device(pdev);
	if (ret)
853
		goto out_fini;
D
Damien Lespiau 已提交
854

855
	ret = i915_driver_early_probe(i915);
856 857
	if (ret < 0)
		goto out_pci_disable;
858

859
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
L
Linus Torvalds 已提交
860

861
	intel_vgpu_detect(i915);
862

863
	ret = i915_driver_mmio_probe(i915);
864 865
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
866

867
	ret = i915_driver_hw_probe(i915);
868 869
	if (ret < 0)
		goto out_cleanup_mmio;
870

871
	ret = intel_modeset_init_noirq(i915);
872
	if (ret < 0)
873
		goto out_cleanup_hw;
874

875 876 877 878
	ret = intel_irq_install(i915);
	if (ret)
		goto out_cleanup_modeset;

879 880
	ret = intel_modeset_init_nogem(i915);
	if (ret)
881 882
		goto out_cleanup_irq;

883 884 885 886 887 888 889 890
	ret = i915_gem_init(i915);
	if (ret)
		goto out_cleanup_modeset2;

	ret = intel_modeset_init(i915);
	if (ret)
		goto out_cleanup_gem;

891
	i915_driver_register(i915);
892

893
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
894

895
	i915_welcome_messages(i915);
896

897 898
	i915->do_release = true;

899 900
	return 0;

901 902 903 904 905 906 907 908 909 910
out_cleanup_gem:
	i915_gem_suspend(i915);
	i915_gem_driver_remove(i915);
	i915_gem_driver_release(i915);
out_cleanup_modeset2:
	/* FIXME clean up the error path */
	intel_modeset_driver_remove(i915);
	intel_irq_uninstall(i915);
	intel_modeset_driver_remove_noirq(i915);
	goto out_cleanup_modeset;
911 912 913
out_cleanup_irq:
	intel_irq_uninstall(i915);
out_cleanup_modeset:
914
	intel_modeset_driver_remove_nogem(i915);
915
out_cleanup_hw:
916 917 918
	i915_driver_hw_remove(i915);
	intel_memory_regions_driver_release(i915);
	i915_ggtt_driver_release(i915);
919
out_cleanup_mmio:
920
	i915_driver_mmio_release(i915);
921
out_runtime_pm_put:
922 923
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
	i915_driver_late_release(i915);
924 925
out_pci_disable:
	pci_disable_device(pdev);
926
out_fini:
927
	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
928 929 930
	return ret;
}

931
void i915_driver_remove(struct drm_i915_private *i915)
932
{
933
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
934

935
	i915_driver_unregister(i915);
936

937 938 939
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

940
	i915_gem_suspend(i915);
B
Ben Widawsky 已提交
941

942
	drm_atomic_helper_shutdown(&i915->drm);
943

944
	intel_gvt_driver_remove(i915);
945

946
	intel_modeset_driver_remove(i915);
947

948 949
	intel_irq_uninstall(i915);

950
	intel_modeset_driver_remove_noirq(i915);
951

952 953
	i915_reset_error_state(i915);
	i915_gem_driver_remove(i915);
954

955
	intel_modeset_driver_remove_nogem(i915);
956

957
	i915_driver_hw_remove(i915);
958

959
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
960 961 962 963 964
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
965
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
966

967 968 969
	if (!dev_priv->do_release)
		return;

970
	disable_rpm_wakeref_asserts(rpm);
971

972
	i915_gem_driver_release(dev_priv);
973

974
	intel_memory_regions_driver_release(dev_priv);
975
	i915_ggtt_driver_release(dev_priv);
976
	i915_gem_drain_freed_objects(dev_priv);
977

978
	i915_driver_mmio_release(dev_priv);
979

980
	enable_rpm_wakeref_asserts(rpm);
981
	intel_runtime_pm_driver_release(rpm);
982

983
	i915_driver_late_release(dev_priv);
984 985
}

986
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
987
{
988
	struct drm_i915_private *i915 = to_i915(dev);
989
	int ret;
990

991
	ret = i915_gem_open(i915, file);
992 993
	if (ret)
		return ret;
994

995 996
	return 0;
}
997

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1015

1016
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1017
{
1018 1019
	struct drm_i915_file_private *file_priv = file->driver_priv;

1020
	i915_gem_context_close(file);
1021

1022
	kfree_rcu(file_priv, rcu);
1023 1024 1025

	/* Catch up with all the deferred frees from "this" client */
	i915_gem_flush_free_objects(to_i915(dev));
1026 1027
}

1028 1029
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1030
	struct drm_device *dev = &dev_priv->drm;
1031
	struct intel_encoder *encoder;
1032 1033

	drm_modeset_lock_all(dev);
1034 1035 1036
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1037 1038 1039
	drm_modeset_unlock_all(dev);
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = &dev_priv->drm;
	struct intel_encoder *encoder;

	drm_modeset_lock_all(dev);
	for_each_intel_encoder(dev, encoder)
		if (encoder->shutdown)
			encoder->shutdown(encoder);
	drm_modeset_unlock_all(dev);
}

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
void i915_driver_shutdown(struct drm_i915_private *i915)
{
	i915_gem_suspend(i915);

	drm_kms_helper_poll_disable(&i915->drm);

	drm_atomic_helper_shutdown(&i915->drm);

	intel_dp_mst_suspend(i915);

	intel_runtime_pm_disable_interrupts(i915);
	intel_hpd_cancel_work(i915);

	intel_suspend_encoders(i915);
1066
	intel_shutdown_encoders(i915);
1067 1068
}

1069 1070 1071 1072 1073 1074 1075 1076
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1077

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
1088
	i915_gem_suspend(i915);
1089

1090
	return 0;
1091 1092
}

1093
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1094
{
1095
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1096
	struct pci_dev *pdev = dev_priv->drm.pdev;
1097
	pci_power_t opregion_target_state;
1098

1099
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1100

1101 1102
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1103
	intel_power_domains_disable(dev_priv);
1104

1105 1106
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
1107
	pci_save_state(pdev);
J
Jesse Barnes 已提交
1108

1109
	intel_display_suspend(dev);
1110

1111
	intel_dp_mst_suspend(dev_priv);
1112

1113 1114
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1115

1116
	intel_suspend_encoders(dev_priv);
1117

1118
	intel_suspend_hw(dev_priv);
1119

1120
	i915_ggtt_suspend(&dev_priv->ggtt);
1121

1122
	i915_save_display(dev_priv);
1123

1124
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1125
	intel_opregion_suspend(dev_priv, opregion_target_state);
1126

1127
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1128

1129 1130
	dev_priv->suspend_count++;

1131
	intel_csr_ucode_suspend(dev_priv);
1132

1133
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1134

1135
	return 0;
1136 1137
}

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

1150
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1151
{
1152
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1153
	struct pci_dev *pdev = dev_priv->drm.pdev;
1154
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1155
	int ret;
1156

1157
	disable_rpm_wakeref_asserts(rpm);
1158

1159 1160
	i915_gem_suspend_late(dev_priv);

1161
	intel_uncore_suspend(&dev_priv->uncore);
1162

1163 1164
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
1165

1166 1167
	intel_display_power_suspend_late(dev_priv);

1168
	ret = vlv_suspend_complete(dev_priv);
1169
	if (ret) {
1170
		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1171
		intel_power_domains_resume(dev_priv);
1172

1173
		goto out;
1174 1175
	}

D
David Weinehall 已提交
1176
	pci_disable_device(pdev);
1177
	/*
1178
	 * During hibernation on some platforms the BIOS may try to access
1179 1180
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1181 1182 1183 1184 1185 1186 1187
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1188
	 */
1189
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
1190
		pci_set_power_state(pdev, PCI_D3hot);
1191

1192
out:
1193
	enable_rpm_wakeref_asserts(rpm);
1194
	if (!dev_priv->uncore.user_forcewake_count)
1195
		intel_runtime_pm_driver_release(rpm);
1196 1197

	return ret;
1198 1199
}

1200
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1201 1202 1203
{
	int error;

1204 1205
	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
			     state.event != PM_EVENT_FREEZE))
1206
		return -EINVAL;
1207

1208
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1209
		return 0;
1210

1211
	error = i915_drm_suspend(&i915->drm);
1212 1213 1214
	if (error)
		return error;

1215
	return i915_drm_suspend_late(&i915->drm, false);
J
Jesse Barnes 已提交
1216 1217
}

1218
static int i915_drm_resume(struct drm_device *dev)
1219
{
1220
	struct drm_i915_private *dev_priv = to_i915(dev);
1221
	int ret;
1222

1223
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1224

1225 1226
	sanitize_gpu(dev_priv);

1227
	ret = i915_ggtt_enable_hw(dev_priv);
1228
	if (ret)
1229
		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1230

1231
	i915_ggtt_resume(&dev_priv->ggtt);
1232

1233 1234
	intel_csr_ucode_resume(dev_priv);

1235
	i915_restore_display(dev_priv);
1236
	intel_pps_unlock_regs_wa(dev_priv);
1237

1238
	intel_init_pch_refclk(dev_priv);
1239

1240 1241 1242 1243 1244
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
1245 1246
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
1247 1248 1249 1250 1251
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1252 1253
	drm_mode_config_reset(dev);

1254
	i915_gem_resume(dev_priv);
1255

1256
	intel_modeset_init_hw(dev_priv);
1257
	intel_init_clock_gating(dev_priv);
1258

1259 1260
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
1261
		dev_priv->display.hpd_irq_setup(dev_priv);
1262
	spin_unlock_irq(&dev_priv->irq_lock);
1263

1264
	intel_dp_mst_resume(dev_priv);
1265

1266 1267
	intel_display_resume(dev);

1268 1269
	drm_kms_helper_poll_enable(dev);

1270 1271 1272
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
1273
	 * bother with the tiny race here where we might lose hotplug
1274 1275 1276
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
1277

1278
	intel_opregion_resume(dev_priv);
1279

1280
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1281

1282 1283
	intel_power_domains_enable(dev_priv);

1284
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1285

1286
	return 0;
1287 1288
}

1289
static int i915_drm_resume_early(struct drm_device *dev)
1290
{
1291
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1292
	struct pci_dev *pdev = dev_priv->drm.pdev;
1293
	int ret;
1294

1295 1296 1297 1298 1299 1300 1301 1302 1303
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
1315
	ret = pci_set_power_state(pdev, PCI_D0);
1316
	if (ret) {
1317 1318
		drm_err(&dev_priv->drm,
			"failed to set PCI D0 power state (%d)\n", ret);
1319
		return ret;
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
1335 1336
	if (pci_enable_device(pdev))
		return -EIO;
1337

D
David Weinehall 已提交
1338
	pci_set_master(pdev);
1339

1340
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1341

1342
	ret = vlv_resume_prepare(dev_priv, false);
1343
	if (ret)
1344
		drm_err(&dev_priv->drm,
1345
			"Resume prepare failed: %d, continuing anyway\n", ret);
1346

1347 1348
	intel_uncore_resume_early(&dev_priv->uncore);

1349
	intel_gt_check_and_clear_faults(&dev_priv->gt);
1350

1351
	intel_display_power_resume_early(dev_priv);
1352

1353
	intel_power_domains_resume(dev_priv);
1354

1355
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1356

1357
	return ret;
1358 1359
}

1360
int i915_resume_switcheroo(struct drm_i915_private *i915)
1361
{
1362
	int ret;
1363

1364
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1365 1366
		return 0;

1367
	ret = i915_drm_resume_early(&i915->drm);
1368 1369 1370
	if (ret)
		return ret;

1371
	return i915_drm_resume(&i915->drm);
1372 1373
}

1374 1375
static int i915_pm_prepare(struct device *kdev)
{
1376
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1377

1378
	if (!i915) {
1379 1380 1381 1382
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

1383
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1384 1385
		return 0;

1386
	return i915_drm_prepare(&i915->drm);
1387 1388
}

1389
static int i915_pm_suspend(struct device *kdev)
1390
{
1391
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1392

1393
	if (!i915) {
1394
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1395 1396
		return -ENODEV;
	}
1397

1398
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1399 1400
		return 0;

1401
	return i915_drm_suspend(&i915->drm);
1402 1403
}

1404
static int i915_pm_suspend_late(struct device *kdev)
1405
{
1406
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1407 1408

	/*
D
Damien Lespiau 已提交
1409
	 * We have a suspend ordering issue with the snd-hda driver also
1410 1411 1412 1413 1414 1415 1416
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1417
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1418
		return 0;
1419

1420
	return i915_drm_suspend_late(&i915->drm, false);
1421 1422
}

1423
static int i915_pm_poweroff_late(struct device *kdev)
1424
{
1425
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1426

1427
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1428 1429
		return 0;

1430
	return i915_drm_suspend_late(&i915->drm, true);
1431 1432
}

1433
static int i915_pm_resume_early(struct device *kdev)
1434
{
1435
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1436

1437
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1438 1439
		return 0;

1440
	return i915_drm_resume_early(&i915->drm);
1441 1442
}

1443
static int i915_pm_resume(struct device *kdev)
1444
{
1445
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1446

1447
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1448 1449
		return 0;

1450
	return i915_drm_resume(&i915->drm);
1451 1452
}

1453
/* freeze: before creating the hibernation_image */
1454
static int i915_pm_freeze(struct device *kdev)
1455
{
1456
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1457 1458
	int ret;

1459 1460
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(&i915->drm);
1461 1462 1463
		if (ret)
			return ret;
	}
1464

1465
	ret = i915_gem_freeze(i915);
1466 1467 1468 1469
	if (ret)
		return ret;

	return 0;
1470 1471
}

1472
static int i915_pm_freeze_late(struct device *kdev)
1473
{
1474
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1475 1476
	int ret;

1477 1478
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(&i915->drm, true);
1479 1480 1481
		if (ret)
			return ret;
	}
1482

1483
	ret = i915_gem_freeze_late(i915);
1484 1485 1486 1487
	if (ret)
		return ret;

	return 0;
1488 1489 1490
}

/* thaw: called after creating the hibernation image, but before turning off. */
1491
static int i915_pm_thaw_early(struct device *kdev)
1492
{
1493
	return i915_pm_resume_early(kdev);
1494 1495
}

1496
static int i915_pm_thaw(struct device *kdev)
1497
{
1498
	return i915_pm_resume(kdev);
1499 1500 1501
}

/* restore: called after loading the hibernation image. */
1502
static int i915_pm_restore_early(struct device *kdev)
1503
{
1504
	return i915_pm_resume_early(kdev);
1505 1506
}

1507
static int i915_pm_restore(struct device *kdev)
1508
{
1509
	return i915_pm_resume(kdev);
1510 1511
}

1512
static int intel_runtime_suspend(struct device *kdev)
1513
{
1514
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1515
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1516
	int ret;
1517

1518
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1519 1520
		return -ENODEV;

1521
	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1522

1523
	disable_rpm_wakeref_asserts(rpm);
1524

1525 1526 1527 1528
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
1529
	i915_gem_runtime_suspend(dev_priv);
1530

1531
	intel_gt_runtime_suspend(&dev_priv->gt);
1532

1533
	intel_runtime_pm_disable_interrupts(dev_priv);
1534

1535
	intel_uncore_suspend(&dev_priv->uncore);
1536

1537 1538
	intel_display_power_suspend(dev_priv);

1539
	ret = vlv_suspend_complete(dev_priv);
1540
	if (ret) {
1541 1542
		drm_err(&dev_priv->drm,
			"Runtime suspend failed, disabling it (%d)\n", ret);
1543
		intel_uncore_runtime_resume(&dev_priv->uncore);
1544

1545
		intel_runtime_pm_enable_interrupts(dev_priv);
1546

1547
		intel_gt_runtime_resume(&dev_priv->gt);
1548

1549
		enable_rpm_wakeref_asserts(rpm);
1550

1551 1552
		return ret;
	}
1553

1554
	enable_rpm_wakeref_asserts(rpm);
1555
	intel_runtime_pm_driver_release(rpm);
1556

1557
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1558 1559
		drm_err(&dev_priv->drm,
			"Unclaimed access detected prior to suspending\n");
1560

1561
	rpm->suspended = true;
1562 1563

	/*
1564 1565
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1566
	 */
1567
	if (IS_BROADWELL(dev_priv)) {
1568 1569 1570 1571 1572 1573
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
1574
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1575
	} else {
1576 1577 1578 1579 1580 1581 1582
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
1583
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1584
	}
1585

1586
	assert_forcewakes_inactive(&dev_priv->uncore);
1587

1588
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1589 1590
		intel_hpd_poll_init(dev_priv);

1591
	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1592 1593 1594
	return 0;
}

1595
static int intel_runtime_resume(struct device *kdev)
1596
{
1597
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1598
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1599
	int ret;
1600

1601
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1602
		return -ENODEV;
1603

1604
	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1605

1606
	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1607
	disable_rpm_wakeref_asserts(rpm);
1608

1609
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1610
	rpm->suspended = false;
1611
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1612 1613
		drm_dbg(&dev_priv->drm,
			"Unclaimed access during suspend, bios?\n");
1614

1615 1616
	intel_display_power_resume(dev_priv);

1617
	ret = vlv_resume_prepare(dev_priv, true);
1618

1619
	intel_uncore_runtime_resume(&dev_priv->uncore);
1620

1621 1622
	intel_runtime_pm_enable_interrupts(dev_priv);

1623 1624 1625 1626
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1627
	intel_gt_runtime_resume(&dev_priv->gt);
1628

1629 1630 1631 1632 1633
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
1634
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1635 1636
		intel_hpd_init(dev_priv);

1637 1638
	intel_enable_ipc(dev_priv);

1639
	enable_rpm_wakeref_asserts(rpm);
1640

1641
	if (ret)
1642 1643
		drm_err(&dev_priv->drm,
			"Runtime resume failed, disabling it (%d)\n", ret);
1644
	else
1645
		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1646 1647

	return ret;
1648 1649
}

1650
const struct dev_pm_ops i915_pm_ops = {
1651 1652 1653 1654
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
1655
	.prepare = i915_pm_prepare,
1656
	.suspend = i915_pm_suspend,
1657 1658
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1659
	.resume = i915_pm_resume,
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
1676 1677 1678 1679
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
1680
	.poweroff = i915_pm_suspend,
1681
	.poweroff_late = i915_pm_poweroff_late,
1682 1683
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
1684 1685

	/* S0ix (via runtime suspend) event handlers */
1686 1687
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1688 1689
};

1690 1691 1692
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
1693
	.release = drm_release_noglobal,
1694
	.unlocked_ioctl = drm_ioctl,
1695
	.mmap = i915_gem_mmap,
1696 1697
	.poll = drm_poll,
	.read = drm_read,
1698
	.compat_ioctl = i915_ioc32_compat_ioctl,
1699 1700 1701
	.llseek = noop_llseek,
};

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1716
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1728
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
1729
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1730 1731
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1732
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1733 1734
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1735
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1736 1737 1738 1739 1740 1741
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1742
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1743 1744
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1745 1746
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1747
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1748
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1749
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
1750 1751 1752 1753
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1754
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1755
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1756 1757 1758 1759 1760 1761
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1762
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1763 1764 1765
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1766 1767
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1768 1769
};

L
Linus Torvalds 已提交
1770
static struct drm_driver driver = {
1771 1772
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1773
	 */
1774
	.driver_features =
1775
	    DRIVER_GEM |
1776 1777
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
	    DRIVER_SYNCOBJ_TIMELINE,
1778
	.release = i915_driver_release,
1779
	.open = i915_driver_open,
1780
	.lastclose = i915_driver_lastclose,
1781
	.postclose = i915_driver_postclose,
1782

1783
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
1784
	.gem_free_object_unlocked = i915_gem_free_object,
1785 1786 1787 1788 1789 1790

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1791
	.dumb_create = i915_gem_dumb_create,
1792 1793
	.dumb_map_offset = i915_gem_dumb_mmap_offset,

L
Linus Torvalds 已提交
1794
	.ioctls = i915_ioctls,
1795
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1796
	.fops = &i915_driver_fops,
1797 1798 1799 1800 1801 1802
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1803
};