i915_drv.c 50.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38
#include <linux/pnp.h>
#include <linux/slab.h>
39
#include <linux/vga_switcheroo.h>
40 41 42
#include <linux/vt.h>
#include <acpi/video.h>

43
#include <drm/drm_atomic_helper.h>
44 45
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
46
#include <drm/drm_managed.h>
47
#include <drm/drm_probe_helper.h>
48

49 50 51 52
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
53
#include "display/intel_csr.h"
54
#include "display/intel_display_debugfs.h"
55
#include "display/intel_display_types.h"
56
#include "display/intel_dp.h"
57 58 59 60 61
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
62
#include "display/intel_vga.h"
63

64
#include "gem/i915_gem_context.h"
65
#include "gem/i915_gem_ioctls.h"
66
#include "gem/i915_gem_mman.h"
67
#include "gt/intel_gt.h"
68
#include "gt/intel_gt_pm.h"
69
#include "gt/intel_rc6.h"
70

71
#include "i915_debugfs.h"
72
#include "i915_drv.h"
73
#include "i915_ioc32.h"
74
#include "i915_irq.h"
75
#include "i915_memcpy.h"
76
#include "i915_perf.h"
L
Lionel Landwerlin 已提交
77
#include "i915_query.h"
78
#include "i915_suspend.h"
79
#include "i915_switcheroo.h"
80
#include "i915_sysfs.h"
81
#include "i915_trace.h"
82
#include "i915_vgpu.h"
83
#include "intel_dram.h"
84
#include "intel_gvt.h"
85
#include "intel_memory_region.h"
86
#include "intel_pm.h"
87
#include "intel_sideband.h"
88
#include "vlv_suspend.h"
J
Jesse Barnes 已提交
89

90
static const struct drm_driver driver;
91

92
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
93
{
94 95 96 97
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
98
	if (!dev_priv->bridge_dev) {
99
		drm_err(&dev_priv->drm, "bridge device not found\n");
100 101 102 103 104 105 106
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
107
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
108
{
109
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
110 111 112 113
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

114
	if (INTEL_GEN(dev_priv) >= 4)
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
136
		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
137 138 139 140
		dev_priv->mch_res.start = 0;
		return ret;
	}

141
	if (INTEL_GEN(dev_priv) >= 4)
142 143 144 145 146 147 148 149 150 151
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
152
intel_setup_mchbar(struct drm_i915_private *dev_priv)
153
{
154
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
155 156 157
	u32 temp;
	bool enabled;

158
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
159 160 161 162
		return;

	dev_priv->mchbar_need_disable = false;

163
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
164 165 166 167 168 169 170 171 172 173 174
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

175
	if (intel_alloc_mchbar_resource(dev_priv))
176 177 178 179 180
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
181
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
182 183 184 185 186 187 188 189 190
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
191
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
192
{
193
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
194 195

	if (dev_priv->mchbar_need_disable) {
196
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
224
	 * by the GPU. i915_retire_requests() is called directly when we
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
248
	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
249 250 251 252 253 254 255 256 257 258

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

259 260 261 262
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
263 264 265 266 267
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
268 269 270
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
271 272 273 274
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
275
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
276
	pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
277
	pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
278

279
	if (pre) {
280
		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
281
			  "It may not be fully functional.\n");
282 283
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
284 285
}

286 287 288 289 290 291
static void sanitize_gpu(struct drm_i915_private *i915)
{
	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
		__intel_gt_reset(&i915->gt, ALL_ENGINES);
}

292
/**
293
 * i915_driver_early_probe - setup state not requiring device access
294 295 296 297 298 299 300 301
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
302
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
303 304 305
{
	int ret = 0;

306
	if (i915_inject_probe_failure(dev_priv))
307 308
		return -ENODEV;

309 310
	intel_device_info_subplatform_init(dev_priv);

311
	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
312
	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
313

314 315 316
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
L
Lyude 已提交
317

318
	mutex_init(&dev_priv->sb_lock);
319
	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
320

321 322 323
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
324
	mutex_init(&dev_priv->hdcp_comp_mutex);
325

326
	i915_memcpy_init_early(dev_priv);
327
	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
328

329 330
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
331
		return ret;
332

333
	ret = vlv_suspend_init(dev_priv);
334 335 336
	if (ret < 0)
		goto err_workqueues;

337 338
	intel_wopcm_init_early(&dev_priv->wopcm);

339
	intel_gt_init_early(&dev_priv->gt, dev_priv);
340

341
	i915_gem_init_early(dev_priv);
342

343
	/* This must be called before any calls to HAS_PCH_* */
344
	intel_detect_pch(dev_priv);
345

346
	intel_pm_setup(dev_priv);
347 348
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
349
		goto err_gem;
350 351 352 353 354
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);

355
	intel_detect_preproduction_hw(dev_priv);
356 357 358

	return 0;

359
err_gem:
360
	i915_gem_cleanup_early(dev_priv);
361
	intel_gt_driver_late_release(&dev_priv->gt);
362
	vlv_suspend_cleanup(dev_priv);
363
err_workqueues:
364 365 366 367 368
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
369
 * i915_driver_late_release - cleanup the setup done in
370
 *			       i915_driver_early_probe()
371 372
 * @dev_priv: device private
 */
373
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
374
{
375
	intel_irq_fini(dev_priv);
376
	intel_power_domains_cleanup(dev_priv);
377
	i915_gem_cleanup_early(dev_priv);
378
	intel_gt_driver_late_release(&dev_priv->gt);
379
	vlv_suspend_cleanup(dev_priv);
380
	i915_workqueues_cleanup(dev_priv);
381

382
	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
383
	mutex_destroy(&dev_priv->sb_lock);
384 385

	i915_params_free(&dev_priv->params);
386 387 388
}

/**
389
 * i915_driver_mmio_probe - setup device MMIO
390 391 392 393 394 395 396
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
397
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
398 399 400
{
	int ret;

401
	if (i915_inject_probe_failure(dev_priv))
402 403
		return -ENODEV;

404
	if (i915_get_bridge_dev(dev_priv))
405 406
		return -EIO;

407
	ret = intel_uncore_init_mmio(&dev_priv->uncore);
408
	if (ret < 0)
409
		goto err_bridge;
410

411 412
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
413
	intel_device_info_runtime_init(dev_priv);
414

415
	ret = intel_gt_init_mmio(&dev_priv->gt);
416 417 418
	if (ret)
		goto err_uncore;

419 420 421
	/* As early as possible, scrub existing GPU state before clobbering */
	sanitize_gpu(dev_priv);

422 423
	return 0;

424
err_uncore:
425
	intel_teardown_mchbar(dev_priv);
426
	intel_uncore_fini_mmio(&dev_priv->uncore);
427
err_bridge:
428 429 430 431 432 433
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
434
 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
435 436
 * @dev_priv: device private
 */
437
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
438
{
439
	intel_teardown_mchbar(dev_priv);
440
	intel_uncore_fini_mmio(&dev_priv->uncore);
441 442 443
	pci_dev_put(dev_priv->bridge_dev);
}

444 445
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
446
	intel_gvt_sanitize_options(dev_priv);
447 448
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
/**
 * i915_set_dma_info - set all relevant PCI dma info as configured for the
 * platform
 * @i915: valid i915 instance
 *
 * Set the dma max segment size, device and coherent masks.  The dma mask set
 * needs to occur before i915_ggtt_probe_hw.
 *
 * A couple of platforms have special needs.  Address them as well.
 *
 */
static int i915_set_dma_info(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;
	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
	int ret;

	GEM_BUG_ON(!mask_size);

	/*
	 * We don't have a max segment size, so set it to the max so sg's
	 * debugging layer doesn't complain
	 */
	dma_set_max_seg_size(&pdev->dev, UINT_MAX);

	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN(i915, 2))
		mask_size = 30;

	/*
	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_I965G(i915) || IS_I965GM(i915))
		mask_size = 32;

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	return 0;

mask_err:
	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
	return ret;
}

505
/**
506
 * i915_driver_hw_probe - setup state requiring device access
507 508 509 510 511
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
512
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
513
{
D
David Weinehall 已提交
514
	struct pci_dev *pdev = dev_priv->drm.pdev;
515 516
	int ret;

517
	if (i915_inject_probe_failure(dev_priv))
518 519
		return -ENODEV;

520 521
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
522
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
523 524 525 526 527 528
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

529 530 531 532 533 534 535 536 537 538 539 540 541 542
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

543
	intel_sanitize_options(dev_priv);
544

545
	/* needs to be done before ggtt probe */
546
	intel_dram_edram_detect(dev_priv);
547

548 549 550 551
	ret = i915_set_dma_info(dev_priv);
	if (ret)
		return ret;

552 553
	i915_perf_init(dev_priv);

554
	ret = i915_ggtt_probe_hw(dev_priv);
555
	if (ret)
556
		goto err_perf;
557

558 559
	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
	if (ret)
560
		goto err_ggtt;
561

562
	ret = i915_ggtt_init_hw(dev_priv);
563
	if (ret)
564
		goto err_ggtt;
565

566 567 568 569
	ret = intel_memory_regions_hw_probe(dev_priv);
	if (ret)
		goto err_ggtt;

570
	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
571

572
	ret = i915_ggtt_enable_hw(dev_priv);
573
	if (ret) {
574
		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
575
		goto err_mem_regions;
576 577
	}

D
David Weinehall 已提交
578
	pci_set_master(pdev);
579

580
	intel_gt_init_workarounds(dev_priv);
581 582 583 584 585 586 587 588 589

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
590 591 592 593
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
594 595 596 597 598 599
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
600
	 */
601
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
602
		if (pci_enable_msi(pdev) < 0)
603
			drm_dbg(&dev_priv->drm, "can't enable MSI");
604 605
	}

606 607
	ret = intel_gvt_init(dev_priv);
	if (ret)
608 609 610
		goto err_msi;

	intel_opregion_setup(dev_priv);
611 612 613 614
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
615
	intel_dram_detect(dev_priv);
616

617 618
	intel_pcode_init(dev_priv);

619
	intel_bw_init_hw(dev_priv);
620

621 622
	return 0;

623 624 625
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
626 627
err_mem_regions:
	intel_memory_regions_driver_release(dev_priv);
628
err_ggtt:
629
	i915_ggtt_driver_release(dev_priv);
630 631
err_perf:
	i915_perf_fini(dev_priv);
632 633 634 635
	return ret;
}

/**
636
 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
637 638
 * @dev_priv: device private
 */
639
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
640
{
D
David Weinehall 已提交
641
	struct pci_dev *pdev = dev_priv->drm.pdev;
642

643 644
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
645 646
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
647 648 649 650 651 652 653 654 655 656 657
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
658
	struct drm_device *dev = &dev_priv->drm;
659

660
	i915_gem_driver_register(dev_priv);
661
	i915_pmu_register(dev_priv);
662

663
	intel_vgpu_register(dev_priv);
664 665 666 667

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
668 669
		if (HAS_DISPLAY(dev_priv))
			intel_display_debugfs_register(dev_priv);
D
David Weinehall 已提交
670
		i915_setup_sysfs(dev_priv);
671 672 673

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
674
	} else
675 676
		drm_err(&dev_priv->drm,
			"Failed to register driver for userspace access!\n");
677

678
	if (HAS_DISPLAY(dev_priv)) {
679 680 681 682 683
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

684
	intel_gt_driver_register(&dev_priv->gt);
685

686
	intel_audio_init(dev_priv);
687 688 689 690 691 692 693 694 695

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
696 697 698 699 700

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
701
	if (HAS_DISPLAY(dev_priv))
702
		drm_kms_helper_poll_init(dev);
703

704
	intel_power_domains_enable(dev_priv);
705
	intel_runtime_pm_enable(&dev_priv->runtime_pm);
706 707 708 709 710

	intel_register_dsm_handler();

	if (i915_switcheroo_register(dev_priv))
		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
711 712 713 714 715 716 717 718
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
719 720 721 722
	i915_switcheroo_unregister(dev_priv);

	intel_unregister_dsm_handler();

723
	intel_runtime_pm_disable(&dev_priv->runtime_pm);
724
	intel_power_domains_disable(dev_priv);
725

726
	intel_fbdev_unregister(dev_priv);
727
	intel_audio_deinit(dev_priv);
728

729 730 731 732 733 734
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);
735
	drm_atomic_helper_shutdown(&dev_priv->drm);
736

737
	intel_gt_driver_unregister(&dev_priv->gt);
738 739 740
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

741
	i915_perf_unregister(dev_priv);
742
	i915_pmu_unregister(dev_priv);
743

D
David Weinehall 已提交
744
	i915_teardown_sysfs(dev_priv);
745
	drm_dev_unplug(&dev_priv->drm);
746

747
	i915_gem_driver_unregister(dev_priv);
748 749
}

750 751
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
752
	if (drm_debug_enabled(DRM_UT_DRIVER)) {
753 754
		struct drm_printer p = drm_debug_printer("i915 device info:");

755
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
756 757 758
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
759 760
			   intel_subplatform(RUNTIME_INFO(dev_priv),
					     INTEL_INFO(dev_priv)->platform),
761 762
			   INTEL_GEN(dev_priv));

763 764
		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
765
		intel_gt_info_print(&dev_priv->gt.info, &p);
766 767 768
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
769
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
770
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
771
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
772
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
773 774
		drm_info(&dev_priv->drm,
			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
775 776
}

777 778 779 780 781 782 783 784
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;

D
Daniel Vetter 已提交
785 786 787 788
	i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
				  struct drm_i915_private, drm);
	if (IS_ERR(i915))
		return i915;
789

790 791
	i915->drm.pdev = pdev;
	pci_set_drvdata(pdev, i915);
792

793 794 795
	/* Device parameters start as a copy of module parameters. */
	i915_params_copy(&i915->params, &i915_modparams);

796 797 798
	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
799
	RUNTIME_INFO(i915)->device_id = pdev->device;
800

801
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
802 803 804 805

	return i915;
}

806
/**
807
 * i915_driver_probe - setup chip and create an initial config
808 809
 * @pdev: PCI device
 * @ent: matching PCI ID entry
810
 *
811
 * The driver probe routine has to do several things:
812 813 814 815 816
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
817
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
818
{
819 820
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
821
	struct drm_i915_private *i915;
822
	int ret;
823

824 825 826
	i915 = i915_driver_create(pdev, ent);
	if (IS_ERR(i915))
		return PTR_ERR(i915);
827

828
	/* Disable nuclear pageflip by default on pre-ILK */
829
	if (!i915->params.nuclear_pageflip && match_info->gen < 5)
830
		i915->drm.driver_features &= ~DRIVER_ATOMIC;
831

832 833 834 835
	/*
	 * Check if we support fake LMEM -- for now we only unleash this for
	 * the live selftests(test-and-exit).
	 */
836
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
837
	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
838
		if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
839
		    i915->params.fake_lmem_start) {
840
			mkwrite_device_info(i915)->memory_regions =
841
				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
842
			GEM_BUG_ON(!HAS_LMEM(i915));
843 844
		}
	}
845
#endif
846

847 848
	ret = pci_enable_device(pdev);
	if (ret)
849
		goto out_fini;
D
Damien Lespiau 已提交
850

851
	ret = i915_driver_early_probe(i915);
852 853
	if (ret < 0)
		goto out_pci_disable;
854

855
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
L
Linus Torvalds 已提交
856

857
	intel_vgpu_detect(i915);
858

859
	ret = i915_driver_mmio_probe(i915);
860 861
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
862

863
	ret = i915_driver_hw_probe(i915);
864 865
	if (ret < 0)
		goto out_cleanup_mmio;
866

867
	ret = intel_modeset_init_noirq(i915);
868
	if (ret < 0)
869
		goto out_cleanup_hw;
870

871 872 873 874
	ret = intel_irq_install(i915);
	if (ret)
		goto out_cleanup_modeset;

875 876
	ret = intel_modeset_init_nogem(i915);
	if (ret)
877 878
		goto out_cleanup_irq;

879 880 881 882 883 884 885 886
	ret = i915_gem_init(i915);
	if (ret)
		goto out_cleanup_modeset2;

	ret = intel_modeset_init(i915);
	if (ret)
		goto out_cleanup_gem;

887
	i915_driver_register(i915);
888

889
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
890

891
	i915_welcome_messages(i915);
892

893 894
	i915->do_release = true;

895 896
	return 0;

897 898 899 900 901 902 903 904 905 906
out_cleanup_gem:
	i915_gem_suspend(i915);
	i915_gem_driver_remove(i915);
	i915_gem_driver_release(i915);
out_cleanup_modeset2:
	/* FIXME clean up the error path */
	intel_modeset_driver_remove(i915);
	intel_irq_uninstall(i915);
	intel_modeset_driver_remove_noirq(i915);
	goto out_cleanup_modeset;
907 908 909
out_cleanup_irq:
	intel_irq_uninstall(i915);
out_cleanup_modeset:
910
	intel_modeset_driver_remove_nogem(i915);
911
out_cleanup_hw:
912 913 914
	i915_driver_hw_remove(i915);
	intel_memory_regions_driver_release(i915);
	i915_ggtt_driver_release(i915);
915
out_cleanup_mmio:
916
	i915_driver_mmio_release(i915);
917
out_runtime_pm_put:
918 919
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
	i915_driver_late_release(i915);
920 921
out_pci_disable:
	pci_disable_device(pdev);
922
out_fini:
923
	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
924 925 926
	return ret;
}

927
void i915_driver_remove(struct drm_i915_private *i915)
928
{
929
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
930

931
	i915_driver_unregister(i915);
932

933 934 935
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

936
	i915_gem_suspend(i915);
B
Ben Widawsky 已提交
937

938
	intel_gvt_driver_remove(i915);
939

940
	intel_modeset_driver_remove(i915);
941

942 943
	intel_irq_uninstall(i915);

944
	intel_modeset_driver_remove_noirq(i915);
945

946 947
	i915_reset_error_state(i915);
	i915_gem_driver_remove(i915);
948

949
	intel_modeset_driver_remove_nogem(i915);
950

951
	i915_driver_hw_remove(i915);
952

953
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
954 955 956 957 958
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
959
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
960

961 962 963
	if (!dev_priv->do_release)
		return;

964
	disable_rpm_wakeref_asserts(rpm);
965

966
	i915_gem_driver_release(dev_priv);
967

968
	intel_memory_regions_driver_release(dev_priv);
969
	i915_ggtt_driver_release(dev_priv);
970
	i915_gem_drain_freed_objects(dev_priv);
971

972
	i915_driver_mmio_release(dev_priv);
973

974
	enable_rpm_wakeref_asserts(rpm);
975
	intel_runtime_pm_driver_release(rpm);
976

977
	i915_driver_late_release(dev_priv);
978 979
}

980
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
981
{
982
	struct drm_i915_private *i915 = to_i915(dev);
983
	int ret;
984

985
	ret = i915_gem_open(i915, file);
986 987
	if (ret)
		return ret;
988

989 990
	return 0;
}
991

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1009

1010
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1011
{
1012 1013
	struct drm_i915_file_private *file_priv = file->driver_priv;

1014
	i915_gem_context_close(file);
1015

1016
	kfree_rcu(file_priv, rcu);
1017 1018 1019

	/* Catch up with all the deferred frees from "this" client */
	i915_gem_flush_free_objects(to_i915(dev));
1020 1021
}

1022 1023
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1024
	struct drm_device *dev = &dev_priv->drm;
1025
	struct intel_encoder *encoder;
1026 1027

	drm_modeset_lock_all(dev);
1028 1029 1030
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1031 1032 1033
	drm_modeset_unlock_all(dev);
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = &dev_priv->drm;
	struct intel_encoder *encoder;

	drm_modeset_lock_all(dev);
	for_each_intel_encoder(dev, encoder)
		if (encoder->shutdown)
			encoder->shutdown(encoder);
	drm_modeset_unlock_all(dev);
}

1046 1047
void i915_driver_shutdown(struct drm_i915_private *i915)
{
1048 1049
	disable_rpm_wakeref_asserts(&i915->runtime_pm);

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	i915_gem_suspend(i915);

	drm_kms_helper_poll_disable(&i915->drm);

	drm_atomic_helper_shutdown(&i915->drm);

	intel_dp_mst_suspend(i915);

	intel_runtime_pm_disable_interrupts(i915);
	intel_hpd_cancel_work(i915);

	intel_suspend_encoders(i915);
1062
	intel_shutdown_encoders(i915);
1063 1064

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1065 1066
}

1067 1068 1069 1070 1071 1072 1073 1074
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1075

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
1086
	i915_gem_suspend(i915);
1087

1088
	return 0;
1089 1090
}

1091
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1092
{
1093
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1094
	struct pci_dev *pdev = dev_priv->drm.pdev;
1095
	pci_power_t opregion_target_state;
1096

1097
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1098

1099 1100
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1101
	intel_power_domains_disable(dev_priv);
1102

1103 1104
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
1105
	pci_save_state(pdev);
J
Jesse Barnes 已提交
1106

1107
	intel_display_suspend(dev);
1108

1109
	intel_dp_mst_suspend(dev_priv);
1110

1111 1112
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1113

1114
	intel_suspend_encoders(dev_priv);
1115

1116
	intel_suspend_hw(dev_priv);
1117

1118
	i915_ggtt_suspend(&dev_priv->ggtt);
1119

1120
	i915_save_display(dev_priv);
1121

1122
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1123
	intel_opregion_suspend(dev_priv, opregion_target_state);
1124

1125
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1126

1127 1128
	dev_priv->suspend_count++;

1129
	intel_csr_ucode_suspend(dev_priv);
1130

1131
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1132

1133
	return 0;
1134 1135
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

1148
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1149
{
1150
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1151
	struct pci_dev *pdev = dev_priv->drm.pdev;
1152
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1153
	int ret;
1154

1155
	disable_rpm_wakeref_asserts(rpm);
1156

1157 1158
	i915_gem_suspend_late(dev_priv);

1159
	intel_uncore_suspend(&dev_priv->uncore);
1160

1161 1162
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
1163

1164 1165
	intel_display_power_suspend_late(dev_priv);

1166
	ret = vlv_suspend_complete(dev_priv);
1167
	if (ret) {
1168
		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1169
		intel_power_domains_resume(dev_priv);
1170

1171
		goto out;
1172 1173
	}

D
David Weinehall 已提交
1174
	pci_disable_device(pdev);
1175
	/*
1176
	 * During hibernation on some platforms the BIOS may try to access
1177 1178
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1179 1180 1181 1182 1183 1184 1185
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1186
	 */
1187
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
1188
		pci_set_power_state(pdev, PCI_D3hot);
1189

1190
out:
1191
	enable_rpm_wakeref_asserts(rpm);
1192
	if (!dev_priv->uncore.user_forcewake_count)
1193
		intel_runtime_pm_driver_release(rpm);
1194 1195

	return ret;
1196 1197
}

1198
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1199 1200 1201
{
	int error;

1202 1203
	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
			     state.event != PM_EVENT_FREEZE))
1204
		return -EINVAL;
1205

1206
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1207
		return 0;
1208

1209
	error = i915_drm_suspend(&i915->drm);
1210 1211 1212
	if (error)
		return error;

1213
	return i915_drm_suspend_late(&i915->drm, false);
J
Jesse Barnes 已提交
1214 1215
}

1216
static int i915_drm_resume(struct drm_device *dev)
1217
{
1218
	struct drm_i915_private *dev_priv = to_i915(dev);
1219
	int ret;
1220

1221
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1222

1223 1224
	sanitize_gpu(dev_priv);

1225
	ret = i915_ggtt_enable_hw(dev_priv);
1226
	if (ret)
1227
		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1228

1229
	i915_ggtt_resume(&dev_priv->ggtt);
1230

1231 1232
	intel_csr_ucode_resume(dev_priv);

1233
	i915_restore_display(dev_priv);
1234
	intel_pps_unlock_regs_wa(dev_priv);
1235

1236
	intel_init_pch_refclk(dev_priv);
1237

1238 1239 1240 1241 1242
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
1243 1244
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
1245 1246 1247 1248 1249
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1250 1251
	drm_mode_config_reset(dev);

1252
	i915_gem_resume(dev_priv);
1253

1254
	intel_modeset_init_hw(dev_priv);
1255
	intel_init_clock_gating(dev_priv);
1256
	intel_hpd_init(dev_priv);
1257

1258
	/* MST sideband requires HPD interrupts enabled */
1259
	intel_dp_mst_resume(dev_priv);
1260 1261
	intel_display_resume(dev);

1262
	intel_hpd_poll_disable(dev_priv);
1263 1264
	drm_kms_helper_poll_enable(dev);

1265
	intel_opregion_resume(dev_priv);
1266

1267
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1268

1269 1270
	intel_power_domains_enable(dev_priv);

1271 1272
	intel_gvt_resume(dev_priv);

1273
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1274

1275
	return 0;
1276 1277
}

1278
static int i915_drm_resume_early(struct drm_device *dev)
1279
{
1280
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1281
	struct pci_dev *pdev = dev_priv->drm.pdev;
1282
	int ret;
1283

1284 1285 1286 1287 1288 1289 1290 1291 1292
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
1304
	ret = pci_set_power_state(pdev, PCI_D0);
1305
	if (ret) {
1306 1307
		drm_err(&dev_priv->drm,
			"failed to set PCI D0 power state (%d)\n", ret);
1308
		return ret;
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
1324 1325
	if (pci_enable_device(pdev))
		return -EIO;
1326

D
David Weinehall 已提交
1327
	pci_set_master(pdev);
1328

1329
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1330

1331
	ret = vlv_resume_prepare(dev_priv, false);
1332
	if (ret)
1333
		drm_err(&dev_priv->drm,
1334
			"Resume prepare failed: %d, continuing anyway\n", ret);
1335

1336 1337
	intel_uncore_resume_early(&dev_priv->uncore);

1338
	intel_gt_check_and_clear_faults(&dev_priv->gt);
1339

1340
	intel_display_power_resume_early(dev_priv);
1341

1342
	intel_power_domains_resume(dev_priv);
1343

1344
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1345

1346
	return ret;
1347 1348
}

1349
int i915_resume_switcheroo(struct drm_i915_private *i915)
1350
{
1351
	int ret;
1352

1353
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1354 1355
		return 0;

1356
	ret = i915_drm_resume_early(&i915->drm);
1357 1358 1359
	if (ret)
		return ret;

1360
	return i915_drm_resume(&i915->drm);
1361 1362
}

1363 1364
static int i915_pm_prepare(struct device *kdev)
{
1365
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1366

1367
	if (!i915) {
1368 1369 1370 1371
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

1372
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1373 1374
		return 0;

1375
	return i915_drm_prepare(&i915->drm);
1376 1377
}

1378
static int i915_pm_suspend(struct device *kdev)
1379
{
1380
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1381

1382
	if (!i915) {
1383
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1384 1385
		return -ENODEV;
	}
1386

1387
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1388 1389
		return 0;

1390
	return i915_drm_suspend(&i915->drm);
1391 1392
}

1393
static int i915_pm_suspend_late(struct device *kdev)
1394
{
1395
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1396 1397

	/*
D
Damien Lespiau 已提交
1398
	 * We have a suspend ordering issue with the snd-hda driver also
1399 1400 1401 1402 1403 1404 1405
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1406
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1407
		return 0;
1408

1409
	return i915_drm_suspend_late(&i915->drm, false);
1410 1411
}

1412
static int i915_pm_poweroff_late(struct device *kdev)
1413
{
1414
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1415

1416
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1417 1418
		return 0;

1419
	return i915_drm_suspend_late(&i915->drm, true);
1420 1421
}

1422
static int i915_pm_resume_early(struct device *kdev)
1423
{
1424
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1425

1426
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1427 1428
		return 0;

1429
	return i915_drm_resume_early(&i915->drm);
1430 1431
}

1432
static int i915_pm_resume(struct device *kdev)
1433
{
1434
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1435

1436
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1437 1438
		return 0;

1439
	return i915_drm_resume(&i915->drm);
1440 1441
}

1442
/* freeze: before creating the hibernation_image */
1443
static int i915_pm_freeze(struct device *kdev)
1444
{
1445
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1446 1447
	int ret;

1448 1449
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(&i915->drm);
1450 1451 1452
		if (ret)
			return ret;
	}
1453

1454
	ret = i915_gem_freeze(i915);
1455 1456 1457 1458
	if (ret)
		return ret;

	return 0;
1459 1460
}

1461
static int i915_pm_freeze_late(struct device *kdev)
1462
{
1463
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1464 1465
	int ret;

1466 1467
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(&i915->drm, true);
1468 1469 1470
		if (ret)
			return ret;
	}
1471

1472
	ret = i915_gem_freeze_late(i915);
1473 1474 1475 1476
	if (ret)
		return ret;

	return 0;
1477 1478 1479
}

/* thaw: called after creating the hibernation image, but before turning off. */
1480
static int i915_pm_thaw_early(struct device *kdev)
1481
{
1482
	return i915_pm_resume_early(kdev);
1483 1484
}

1485
static int i915_pm_thaw(struct device *kdev)
1486
{
1487
	return i915_pm_resume(kdev);
1488 1489 1490
}

/* restore: called after loading the hibernation image. */
1491
static int i915_pm_restore_early(struct device *kdev)
1492
{
1493
	return i915_pm_resume_early(kdev);
1494 1495
}

1496
static int i915_pm_restore(struct device *kdev)
1497
{
1498
	return i915_pm_resume(kdev);
1499 1500
}

1501
static int intel_runtime_suspend(struct device *kdev)
1502
{
1503
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1504
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1505
	int ret;
1506

1507
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1508 1509
		return -ENODEV;

1510
	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1511

1512
	disable_rpm_wakeref_asserts(rpm);
1513

1514 1515 1516 1517
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
1518
	i915_gem_runtime_suspend(dev_priv);
1519

1520
	intel_gt_runtime_suspend(&dev_priv->gt);
1521

1522
	intel_runtime_pm_disable_interrupts(dev_priv);
1523

1524
	intel_uncore_suspend(&dev_priv->uncore);
1525

1526 1527
	intel_display_power_suspend(dev_priv);

1528
	ret = vlv_suspend_complete(dev_priv);
1529
	if (ret) {
1530 1531
		drm_err(&dev_priv->drm,
			"Runtime suspend failed, disabling it (%d)\n", ret);
1532
		intel_uncore_runtime_resume(&dev_priv->uncore);
1533

1534
		intel_runtime_pm_enable_interrupts(dev_priv);
1535

1536
		intel_gt_runtime_resume(&dev_priv->gt);
1537

1538
		enable_rpm_wakeref_asserts(rpm);
1539

1540 1541
		return ret;
	}
1542

1543
	enable_rpm_wakeref_asserts(rpm);
1544
	intel_runtime_pm_driver_release(rpm);
1545

1546
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1547 1548
		drm_err(&dev_priv->drm,
			"Unclaimed access detected prior to suspending\n");
1549

1550
	rpm->suspended = true;
1551 1552

	/*
1553 1554
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1555
	 */
1556
	if (IS_BROADWELL(dev_priv)) {
1557 1558 1559 1560 1561 1562
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
1563
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1564
	} else {
1565 1566 1567 1568 1569 1570 1571
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
1572
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1573
	}
1574

1575
	assert_forcewakes_inactive(&dev_priv->uncore);
1576

1577
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1578
		intel_hpd_poll_enable(dev_priv);
1579

1580
	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1581 1582 1583
	return 0;
}

1584
static int intel_runtime_resume(struct device *kdev)
1585
{
1586
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1587
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1588
	int ret;
1589

1590
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1591
		return -ENODEV;
1592

1593
	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1594

1595
	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1596
	disable_rpm_wakeref_asserts(rpm);
1597

1598
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1599
	rpm->suspended = false;
1600
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1601 1602
		drm_dbg(&dev_priv->drm,
			"Unclaimed access during suspend, bios?\n");
1603

1604 1605
	intel_display_power_resume(dev_priv);

1606
	ret = vlv_resume_prepare(dev_priv, true);
1607

1608
	intel_uncore_runtime_resume(&dev_priv->uncore);
1609

1610 1611
	intel_runtime_pm_enable_interrupts(dev_priv);

1612 1613 1614 1615
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1616
	intel_gt_runtime_resume(&dev_priv->gt);
1617

1618 1619 1620 1621 1622
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
1623
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
1624
		intel_hpd_init(dev_priv);
1625 1626
		intel_hpd_poll_disable(dev_priv);
	}
1627

1628 1629
	intel_enable_ipc(dev_priv);

1630
	enable_rpm_wakeref_asserts(rpm);
1631

1632
	if (ret)
1633 1634
		drm_err(&dev_priv->drm,
			"Runtime resume failed, disabling it (%d)\n", ret);
1635
	else
1636
		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1637 1638

	return ret;
1639 1640
}

1641
const struct dev_pm_ops i915_pm_ops = {
1642 1643 1644 1645
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
1646
	.prepare = i915_pm_prepare,
1647
	.suspend = i915_pm_suspend,
1648 1649
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1650
	.resume = i915_pm_resume,
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
1667 1668 1669 1670
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
1671
	.poweroff = i915_pm_suspend,
1672
	.poweroff_late = i915_pm_poweroff_late,
1673 1674
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
1675 1676

	/* S0ix (via runtime suspend) event handlers */
1677 1678
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1679 1680
};

1681 1682 1683
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
1684
	.release = drm_release_noglobal,
1685
	.unlocked_ioctl = drm_ioctl,
1686
	.mmap = i915_gem_mmap,
1687 1688
	.poll = drm_poll,
	.read = drm_read,
1689
	.compat_ioctl = i915_ioc32_compat_ioctl,
1690 1691 1692
	.llseek = noop_llseek,
};

1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1707
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1719
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
1720
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1721 1722
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1723
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1724 1725
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1726
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1727 1728 1729 1730 1731 1732
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1733
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1734 1735
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1736 1737
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1738
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1739
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1740
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
1741 1742 1743 1744
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1745
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1746
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1747 1748 1749 1750 1751 1752
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1753
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1754 1755 1756
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1757 1758
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1759 1760
};

1761
static const struct drm_driver driver = {
1762 1763
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1764
	 */
1765
	.driver_features =
1766
	    DRIVER_GEM |
1767 1768
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
	    DRIVER_SYNCOBJ_TIMELINE,
1769
	.release = i915_driver_release,
1770
	.open = i915_driver_open,
1771
	.lastclose = i915_driver_lastclose,
1772
	.postclose = i915_driver_postclose,
1773

1774 1775 1776 1777
	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_import = i915_gem_prime_import,

1778
	.dumb_create = i915_gem_dumb_create,
1779 1780
	.dumb_map_offset = i915_gem_dumb_mmap_offset,

L
Linus Torvalds 已提交
1781
	.ioctls = i915_ioctls,
1782
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1783
	.fops = &i915_driver_fops,
1784 1785 1786 1787 1788 1789
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1790
};