i915_drv.c 52.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38
#include <linux/pnp.h>
#include <linux/slab.h>
39
#include <linux/vga_switcheroo.h>
40 41 42
#include <linux/vt.h>
#include <acpi/video.h>

43
#include <drm/drm_atomic_helper.h>
44 45
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
46
#include <drm/drm_managed.h>
47
#include <drm/drm_probe_helper.h>
48

49 50 51 52
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
53
#include "display/intel_csr.h"
54
#include "display/intel_display_debugfs.h"
55
#include "display/intel_display_types.h"
56
#include "display/intel_dp.h"
57 58 59 60
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
61
#include "display/intel_psr.h"
62
#include "display/intel_sprite.h"
63
#include "display/intel_vga.h"
64

65
#include "gem/i915_gem_context.h"
66
#include "gem/i915_gem_ioctls.h"
67
#include "gem/i915_gem_mman.h"
68
#include "gt/intel_gt.h"
69
#include "gt/intel_gt_pm.h"
70
#include "gt/intel_rc6.h"
71

72
#include "i915_debugfs.h"
73
#include "i915_drv.h"
74
#include "i915_ioc32.h"
75
#include "i915_irq.h"
76
#include "i915_memcpy.h"
77
#include "i915_perf.h"
L
Lionel Landwerlin 已提交
78
#include "i915_query.h"
79
#include "i915_suspend.h"
80
#include "i915_switcheroo.h"
81
#include "i915_sysfs.h"
82
#include "i915_trace.h"
83
#include "i915_vgpu.h"
84
#include "intel_dram.h"
85
#include "intel_gvt.h"
86
#include "intel_memory_region.h"
87
#include "intel_pm.h"
88
#include "vlv_suspend.h"
J
Jesse Barnes 已提交
89

90 91
static struct drm_driver driver;

92
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
93
{
94 95 96 97
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
98
	if (!dev_priv->bridge_dev) {
99
		drm_err(&dev_priv->drm, "bridge device not found\n");
100 101 102 103 104 105 106
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
107
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
108
{
109
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
110 111 112 113
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

114
	if (INTEL_GEN(dev_priv) >= 4)
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
136
		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
137 138 139 140
		dev_priv->mch_res.start = 0;
		return ret;
	}

141
	if (INTEL_GEN(dev_priv) >= 4)
142 143 144 145 146 147 148 149 150 151
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
152
intel_setup_mchbar(struct drm_i915_private *dev_priv)
153
{
154
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
155 156 157
	u32 temp;
	bool enabled;

158
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
159 160 161 162
		return;

	dev_priv->mchbar_need_disable = false;

163
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
164 165 166 167 168 169 170 171 172 173 174
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

175
	if (intel_alloc_mchbar_resource(dev_priv))
176 177 178 179 180
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
181
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
182 183 184 185 186 187 188 189 190
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
191
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
192
{
193
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
194 195

	if (dev_priv->mchbar_need_disable) {
196
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

219 220
/* part #1: call before irq install */
static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
221 222 223
{
	int ret;

224
	if (i915_inject_probe_failure(i915))
225 226
		return -ENODEV;

227 228 229
	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
		ret = drm_vblank_init(&i915->drm,
				      INTEL_NUM_PIPES(i915));
230
		if (ret)
231
			return ret;
232 233
	}

234
	intel_bios_init(i915);
235

236 237
	ret = intel_vga_register(i915);
	if (ret)
238
		goto cleanup_bios;
239

240
	intel_power_domains_init_hw(i915, false);
241

242
	intel_csr_ucode_init(i915);
243

244 245
	ret = intel_modeset_init_noirq(i915);
	if (ret)
246
		goto cleanup_vga_client_pw_domain_csr;
247

248 249
	return 0;

250 251 252
cleanup_vga_client_pw_domain_csr:
	intel_csr_ucode_fini(i915);
	intel_power_domains_driver_remove(i915);
253
	intel_vga_unregister(i915);
254 255
cleanup_bios:
	intel_bios_driver_remove(i915);
256 257 258 259 260 261 262
	return ret;
}

/* part #2: call after irq install */
static int i915_driver_modeset_probe(struct drm_i915_private *i915)
{
	int ret;
263 264 265

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
266
	ret = intel_modeset_init(i915);
267
	if (ret)
268
		goto out;
269

270
	ret = i915_gem_init(i915);
271
	if (ret)
272
		goto cleanup_modeset;
273

274
	intel_overlay_setup(i915);
275

276
	if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
277 278
		return 0;

279
	ret = intel_fbdev_init(&i915->drm);
280 281 282 283
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
284
	intel_hpd_init(i915);
285

286
	intel_init_ipc(i915);
287

288 289
	intel_psr_set_force_mode_changed(i915->psr.dp);

290 291 292
	return 0;

cleanup_gem:
293 294 295
	i915_gem_suspend(i915);
	i915_gem_driver_remove(i915);
	i915_gem_driver_release(i915);
296
cleanup_modeset:
297
	/* FIXME */
298
	intel_modeset_driver_remove(i915);
299 300
	intel_irq_uninstall(i915);
	intel_modeset_driver_remove_noirq(i915);
301 302 303 304
out:
	return ret;
}

305
/* part #1: call before irq uninstall */
306 307
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
308
	intel_modeset_driver_remove(i915);
309
}
310

311 312 313
/* part #2: call after irq uninstall */
static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
{
314
	intel_csr_ucode_fini(i915);
315

316
	intel_power_domains_driver_remove(i915);
317

318
	intel_vga_unregister(i915);
319

320
	intel_bios_driver_remove(i915);
321 322
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
343
	 * by the GPU. i915_retire_requests() is called directly when we
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
367
	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
368 369 370 371 372 373 374 375 376 377

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

378 379 380 381
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
382 383 384 385 386
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
387 388 389
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
390 391 392 393
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
394
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
395
	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
396
	pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
397

398
	if (pre) {
399
		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
400
			  "It may not be fully functional.\n");
401 402
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
403 404
}

405 406 407 408 409 410
static void sanitize_gpu(struct drm_i915_private *i915)
{
	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
		__intel_gt_reset(&i915->gt, ALL_ENGINES);
}

411
/**
412
 * i915_driver_early_probe - setup state not requiring device access
413 414 415 416 417 418 419 420
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
421
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
422 423 424
{
	int ret = 0;

425
	if (i915_inject_probe_failure(dev_priv))
426 427
		return -ENODEV;

428 429
	intel_device_info_subplatform_init(dev_priv);

430
	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
431
	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
432

433 434 435
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
L
Lyude 已提交
436

437
	mutex_init(&dev_priv->sb_lock);
438
	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
439

440 441 442
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
443
	mutex_init(&dev_priv->hdcp_comp_mutex);
444

445
	i915_memcpy_init_early(dev_priv);
446
	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
447

448 449
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
450
		return ret;
451

452
	ret = vlv_suspend_init(dev_priv);
453 454 455
	if (ret < 0)
		goto err_workqueues;

456 457
	intel_wopcm_init_early(&dev_priv->wopcm);

458
	intel_gt_init_early(&dev_priv->gt, dev_priv);
459

460
	i915_gem_init_early(dev_priv);
461

462
	/* This must be called before any calls to HAS_PCH_* */
463
	intel_detect_pch(dev_priv);
464

465
	intel_pm_setup(dev_priv);
466
	intel_init_dpio(dev_priv);
467 468
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
469
		goto err_gem;
470 471 472 473 474
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);

475
	intel_detect_preproduction_hw(dev_priv);
476 477 478

	return 0;

479
err_gem:
480
	i915_gem_cleanup_early(dev_priv);
481
	intel_gt_driver_late_release(&dev_priv->gt);
482
	vlv_suspend_cleanup(dev_priv);
483
err_workqueues:
484 485 486 487 488
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
489
 * i915_driver_late_release - cleanup the setup done in
490
 *			       i915_driver_early_probe()
491 492
 * @dev_priv: device private
 */
493
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
494
{
495
	intel_irq_fini(dev_priv);
496
	intel_power_domains_cleanup(dev_priv);
497
	i915_gem_cleanup_early(dev_priv);
498
	intel_gt_driver_late_release(&dev_priv->gt);
499
	vlv_suspend_cleanup(dev_priv);
500
	i915_workqueues_cleanup(dev_priv);
501

502
	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
503
	mutex_destroy(&dev_priv->sb_lock);
504 505

	i915_params_free(&dev_priv->params);
506 507 508
}

/**
509
 * i915_driver_mmio_probe - setup device MMIO
510 511 512 513 514 515 516
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
517
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
518 519 520
{
	int ret;

521
	if (i915_inject_probe_failure(dev_priv))
522 523
		return -ENODEV;

524
	if (i915_get_bridge_dev(dev_priv))
525 526
		return -EIO;

527
	ret = intel_uncore_init_mmio(&dev_priv->uncore);
528
	if (ret < 0)
529
		goto err_bridge;
530

531 532
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
533

534
	intel_uc_init_mmio(&dev_priv->gt.uc);
535

536
	ret = intel_engines_init_mmio(&dev_priv->gt);
537 538 539
	if (ret)
		goto err_uncore;

540 541 542
	/* As early as possible, scrub existing GPU state before clobbering */
	sanitize_gpu(dev_priv);

543 544
	return 0;

545
err_uncore:
546
	intel_teardown_mchbar(dev_priv);
547
	intel_uncore_fini_mmio(&dev_priv->uncore);
548
err_bridge:
549 550 551 552 553 554
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
555
 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
556 557
 * @dev_priv: device private
 */
558
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
559
{
560
	intel_teardown_mchbar(dev_priv);
561
	intel_uncore_fini_mmio(&dev_priv->uncore);
562 563 564
	pci_dev_put(dev_priv->bridge_dev);
}

565 566
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
567
	intel_gvt_sanitize_options(dev_priv);
568 569
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
/**
 * i915_set_dma_info - set all relevant PCI dma info as configured for the
 * platform
 * @i915: valid i915 instance
 *
 * Set the dma max segment size, device and coherent masks.  The dma mask set
 * needs to occur before i915_ggtt_probe_hw.
 *
 * A couple of platforms have special needs.  Address them as well.
 *
 */
static int i915_set_dma_info(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;
	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
	int ret;

	GEM_BUG_ON(!mask_size);

	/*
	 * We don't have a max segment size, so set it to the max so sg's
	 * debugging layer doesn't complain
	 */
	dma_set_max_seg_size(&pdev->dev, UINT_MAX);

	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN(i915, 2))
		mask_size = 30;

	/*
	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_I965G(i915) || IS_I965GM(i915))
		mask_size = 32;

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	return 0;

mask_err:
	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
	return ret;
}

626
/**
627
 * i915_driver_hw_probe - setup state requiring device access
628 629 630 631 632
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
633
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
634
{
D
David Weinehall 已提交
635
	struct pci_dev *pdev = dev_priv->drm.pdev;
636 637
	int ret;

638
	if (i915_inject_probe_failure(dev_priv))
639 640
		return -ENODEV;

641
	intel_device_info_runtime_init(dev_priv);
642

643 644
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
645
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
646 647 648 649 650 651
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

652 653 654 655 656 657 658 659 660 661 662 663 664 665
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

666
	intel_sanitize_options(dev_priv);
667

668
	/* needs to be done before ggtt probe */
669
	intel_dram_edram_detect(dev_priv);
670

671 672 673 674
	ret = i915_set_dma_info(dev_priv);
	if (ret)
		return ret;

675 676
	i915_perf_init(dev_priv);

677
	ret = i915_ggtt_probe_hw(dev_priv);
678
	if (ret)
679
		goto err_perf;
680

681 682
	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
	if (ret)
683
		goto err_ggtt;
684

685
	ret = i915_ggtt_init_hw(dev_priv);
686
	if (ret)
687
		goto err_ggtt;
688

689 690 691 692
	ret = intel_memory_regions_hw_probe(dev_priv);
	if (ret)
		goto err_ggtt;

693
	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
694

695
	ret = i915_ggtt_enable_hw(dev_priv);
696
	if (ret) {
697
		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
698
		goto err_mem_regions;
699 700
	}

D
David Weinehall 已提交
701
	pci_set_master(pdev);
702

703
	cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
704

705
	intel_gt_init_workarounds(dev_priv);
706 707 708 709 710 711 712 713 714

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
715 716 717 718
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
719 720 721 722 723 724
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
725
	 */
726
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
727
		if (pci_enable_msi(pdev) < 0)
728
			drm_dbg(&dev_priv->drm, "can't enable MSI");
729 730
	}

731 732
	ret = intel_gvt_init(dev_priv);
	if (ret)
733 734 735
		goto err_msi;

	intel_opregion_setup(dev_priv);
736 737 738 739
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
740
	intel_dram_detect(dev_priv);
741

742
	intel_bw_init_hw(dev_priv);
743

744 745
	return 0;

746 747 748
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
749
	cpu_latency_qos_remove_request(&dev_priv->pm_qos);
750 751
err_mem_regions:
	intel_memory_regions_driver_release(dev_priv);
752
err_ggtt:
753
	i915_ggtt_driver_release(dev_priv);
754 755
err_perf:
	i915_perf_fini(dev_priv);
756 757 758 759
	return ret;
}

/**
760
 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
761 762
 * @dev_priv: device private
 */
763
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
764
{
D
David Weinehall 已提交
765
	struct pci_dev *pdev = dev_priv->drm.pdev;
766

767 768
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
769 770
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
771

772
	cpu_latency_qos_remove_request(&dev_priv->pm_qos);
773 774 775 776 777 778 779 780 781 782 783
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
784
	struct drm_device *dev = &dev_priv->drm;
785

786
	i915_gem_driver_register(dev_priv);
787
	i915_pmu_register(dev_priv);
788

789
	intel_vgpu_register(dev_priv);
790 791 792 793

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
794
		intel_display_debugfs_register(dev_priv);
D
David Weinehall 已提交
795
		i915_setup_sysfs(dev_priv);
796 797 798

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
799
	} else
800 801
		drm_err(&dev_priv->drm,
			"Failed to register driver for userspace access!\n");
802

803
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
804 805 806 807 808
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

809
	intel_gt_driver_register(&dev_priv->gt);
810

811
	intel_audio_init(dev_priv);
812 813 814 815 816 817 818 819 820

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
821 822 823 824 825

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
826
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
827
		drm_kms_helper_poll_init(dev);
828

829
	intel_power_domains_enable(dev_priv);
830
	intel_runtime_pm_enable(&dev_priv->runtime_pm);
831 832 833 834 835

	intel_register_dsm_handler();

	if (i915_switcheroo_register(dev_priv))
		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
836 837 838 839 840 841 842 843
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
844 845 846 847
	i915_switcheroo_unregister(dev_priv);

	intel_unregister_dsm_handler();

848
	intel_runtime_pm_disable(&dev_priv->runtime_pm);
849
	intel_power_domains_disable(dev_priv);
850

851
	intel_fbdev_unregister(dev_priv);
852
	intel_audio_deinit(dev_priv);
853

854 855 856 857 858 859 860
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

861
	intel_gt_driver_unregister(&dev_priv->gt);
862 863 864
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

865
	i915_perf_unregister(dev_priv);
866
	i915_pmu_unregister(dev_priv);
867

D
David Weinehall 已提交
868
	i915_teardown_sysfs(dev_priv);
869
	drm_dev_unplug(&dev_priv->drm);
870

871
	i915_gem_driver_unregister(dev_priv);
872 873
}

874 875
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
876
	if (drm_debug_enabled(DRM_UT_DRIVER)) {
877 878
		struct drm_printer p = drm_debug_printer("i915 device info:");

879
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
880 881 882
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
883 884
			   intel_subplatform(RUNTIME_INFO(dev_priv),
					     INTEL_INFO(dev_priv)->platform),
885 886
			   INTEL_GEN(dev_priv));

887 888
		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
889 890 891
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
892
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
893
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
894
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
895
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
896 897
		drm_info(&dev_priv->drm,
			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
898 899
}

900 901 902 903 904 905 906 907
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;

D
Daniel Vetter 已提交
908 909 910 911
	i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
				  struct drm_i915_private, drm);
	if (IS_ERR(i915))
		return i915;
912

913 914
	i915->drm.pdev = pdev;
	pci_set_drvdata(pdev, i915);
915

916 917 918
	/* Device parameters start as a copy of module parameters. */
	i915_params_copy(&i915->params, &i915_modparams);

919 920 921
	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
922
	RUNTIME_INFO(i915)->device_id = pdev->device;
923

924
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
925 926 927 928

	return i915;
}

929
/**
930
 * i915_driver_probe - setup chip and create an initial config
931 932
 * @pdev: PCI device
 * @ent: matching PCI ID entry
933
 *
934
 * The driver probe routine has to do several things:
935 936 937 938 939
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
940
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
941
{
942 943
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
944
	struct drm_i915_private *i915;
945
	int ret;
946

947 948 949
	i915 = i915_driver_create(pdev, ent);
	if (IS_ERR(i915))
		return PTR_ERR(i915);
950

951
	/* Disable nuclear pageflip by default on pre-ILK */
952
	if (!i915->params.nuclear_pageflip && match_info->gen < 5)
953
		i915->drm.driver_features &= ~DRIVER_ATOMIC;
954

955 956 957 958
	/*
	 * Check if we support fake LMEM -- for now we only unleash this for
	 * the live selftests(test-and-exit).
	 */
959
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
960
	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
961
		if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
962
		    i915->params.fake_lmem_start) {
963
			mkwrite_device_info(i915)->memory_regions =
964
				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
965 966 967
			mkwrite_device_info(i915)->is_dgfx = true;
			GEM_BUG_ON(!HAS_LMEM(i915));
			GEM_BUG_ON(!IS_DGFX(i915));
968 969
		}
	}
970
#endif
971

972 973
	ret = pci_enable_device(pdev);
	if (ret)
974
		goto out_fini;
D
Damien Lespiau 已提交
975

976
	ret = i915_driver_early_probe(i915);
977 978
	if (ret < 0)
		goto out_pci_disable;
979

980
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
L
Linus Torvalds 已提交
981

982
	intel_vgpu_detect(i915);
983

984
	ret = i915_driver_mmio_probe(i915);
985 986
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
987

988
	ret = i915_driver_hw_probe(i915);
989 990
	if (ret < 0)
		goto out_cleanup_mmio;
991

992
	ret = i915_driver_modeset_probe_noirq(i915);
993
	if (ret < 0)
994
		goto out_cleanup_hw;
995

996 997 998 999 1000 1001 1002 1003
	ret = intel_irq_install(i915);
	if (ret)
		goto out_cleanup_modeset;

	ret = i915_driver_modeset_probe(i915);
	if (ret < 0)
		goto out_cleanup_irq;

1004
	i915_driver_register(i915);
1005

1006
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1007

1008
	i915_welcome_messages(i915);
1009

1010 1011
	i915->do_release = true;

1012 1013
	return 0;

1014 1015 1016
out_cleanup_irq:
	intel_irq_uninstall(i915);
out_cleanup_modeset:
1017
	i915_driver_modeset_remove_noirq(i915);
1018
out_cleanup_hw:
1019 1020 1021
	i915_driver_hw_remove(i915);
	intel_memory_regions_driver_release(i915);
	i915_ggtt_driver_release(i915);
1022
out_cleanup_mmio:
1023
	i915_driver_mmio_release(i915);
1024
out_runtime_pm_put:
1025 1026
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
	i915_driver_late_release(i915);
1027 1028
out_pci_disable:
	pci_disable_device(pdev);
1029
out_fini:
1030
	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
1031 1032 1033
	return ret;
}

1034
void i915_driver_remove(struct drm_i915_private *i915)
1035
{
1036
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1037

1038
	i915_driver_unregister(i915);
1039

1040 1041 1042
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

1043
	i915_gem_suspend(i915);
B
Ben Widawsky 已提交
1044

1045
	drm_atomic_helper_shutdown(&i915->drm);
1046

1047
	intel_gvt_driver_remove(i915);
1048

1049
	i915_driver_modeset_remove(i915);
1050

1051 1052
	intel_irq_uninstall(i915);

1053
	intel_modeset_driver_remove_noirq(i915);
1054

1055 1056
	i915_reset_error_state(i915);
	i915_gem_driver_remove(i915);
1057

1058
	i915_driver_modeset_remove_noirq(i915);
1059

1060
	i915_driver_hw_remove(i915);
1061

1062
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1063 1064 1065 1066 1067
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
1068
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1069

1070 1071 1072
	if (!dev_priv->do_release)
		return;

1073
	disable_rpm_wakeref_asserts(rpm);
1074

1075
	i915_gem_driver_release(dev_priv);
1076

1077
	intel_memory_regions_driver_release(dev_priv);
1078
	i915_ggtt_driver_release(dev_priv);
1079

1080
	i915_driver_mmio_release(dev_priv);
1081

1082
	enable_rpm_wakeref_asserts(rpm);
1083
	intel_runtime_pm_driver_release(rpm);
1084

1085
	i915_driver_late_release(dev_priv);
1086 1087
}

1088
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1089
{
1090
	struct drm_i915_private *i915 = to_i915(dev);
1091
	int ret;
1092

1093
	ret = i915_gem_open(i915, file);
1094 1095
	if (ret)
		return ret;
1096

1097 1098
	return 0;
}
1099

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1117

1118
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1119
{
1120 1121
	struct drm_i915_file_private *file_priv = file->driver_priv;

1122
	i915_gem_context_close(file);
1123 1124
	i915_gem_release(dev, file);

1125
	kfree_rcu(file_priv, rcu);
1126 1127 1128

	/* Catch up with all the deferred frees from "this" client */
	i915_gem_flush_free_objects(to_i915(dev));
1129 1130
}

1131 1132
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1133
	struct drm_device *dev = &dev_priv->drm;
1134
	struct intel_encoder *encoder;
1135 1136

	drm_modeset_lock_all(dev);
1137 1138 1139
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1140 1141 1142
	drm_modeset_unlock_all(dev);
}

1143 1144 1145 1146 1147 1148 1149 1150
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1151

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
1162
	i915_gem_suspend(i915);
1163

1164
	return 0;
1165 1166
}

1167
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1168
{
1169
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1170
	struct pci_dev *pdev = dev_priv->drm.pdev;
1171
	pci_power_t opregion_target_state;
1172

1173
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1174

1175 1176
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1177
	intel_power_domains_disable(dev_priv);
1178

1179 1180
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
1181
	pci_save_state(pdev);
J
Jesse Barnes 已提交
1182

1183
	intel_display_suspend(dev);
1184

1185
	intel_dp_mst_suspend(dev_priv);
1186

1187 1188
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1189

1190
	intel_suspend_encoders(dev_priv);
1191

1192
	intel_suspend_hw(dev_priv);
1193

1194
	i915_ggtt_suspend(&dev_priv->ggtt);
1195

1196
	i915_save_state(dev_priv);
1197

1198
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1199
	intel_opregion_suspend(dev_priv, opregion_target_state);
1200

1201
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1202

1203 1204
	dev_priv->suspend_count++;

1205
	intel_csr_ucode_suspend(dev_priv);
1206

1207
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1208

1209
	return 0;
1210 1211
}

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

1224
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1225
{
1226
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1227
	struct pci_dev *pdev = dev_priv->drm.pdev;
1228
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1229
	int ret;
1230

1231
	disable_rpm_wakeref_asserts(rpm);
1232

1233 1234
	i915_gem_suspend_late(dev_priv);

1235
	intel_uncore_suspend(&dev_priv->uncore);
1236

1237 1238
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
1239

1240 1241
	intel_display_power_suspend_late(dev_priv);

1242
	ret = vlv_suspend_complete(dev_priv);
1243
	if (ret) {
1244
		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1245
		intel_power_domains_resume(dev_priv);
1246

1247
		goto out;
1248 1249
	}

D
David Weinehall 已提交
1250
	pci_disable_device(pdev);
1251
	/*
1252
	 * During hibernation on some platforms the BIOS may try to access
1253 1254
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1255 1256 1257 1258 1259 1260 1261
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1262
	 */
1263
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
1264
		pci_set_power_state(pdev, PCI_D3hot);
1265

1266
out:
1267
	enable_rpm_wakeref_asserts(rpm);
1268
	if (!dev_priv->uncore.user_forcewake_count)
1269
		intel_runtime_pm_driver_release(rpm);
1270 1271

	return ret;
1272 1273
}

1274
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1275 1276 1277
{
	int error;

1278 1279
	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
			     state.event != PM_EVENT_FREEZE))
1280
		return -EINVAL;
1281

1282
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1283
		return 0;
1284

1285
	error = i915_drm_suspend(&i915->drm);
1286 1287 1288
	if (error)
		return error;

1289
	return i915_drm_suspend_late(&i915->drm, false);
J
Jesse Barnes 已提交
1290 1291
}

1292
static int i915_drm_resume(struct drm_device *dev)
1293
{
1294
	struct drm_i915_private *dev_priv = to_i915(dev);
1295
	int ret;
1296

1297
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1298

1299 1300
	sanitize_gpu(dev_priv);

1301
	ret = i915_ggtt_enable_hw(dev_priv);
1302
	if (ret)
1303
		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1304

1305
	i915_ggtt_resume(&dev_priv->ggtt);
1306

1307 1308
	intel_csr_ucode_resume(dev_priv);

1309
	i915_restore_state(dev_priv);
1310
	intel_pps_unlock_regs_wa(dev_priv);
1311

1312
	intel_init_pch_refclk(dev_priv);
1313

1314 1315 1316 1317 1318
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
1319 1320
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
1321 1322 1323 1324 1325
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1326 1327
	drm_mode_config_reset(dev);

1328
	i915_gem_resume(dev_priv);
1329

1330
	intel_modeset_init_hw(dev_priv);
1331
	intel_init_clock_gating(dev_priv);
1332

1333 1334
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
1335
		dev_priv->display.hpd_irq_setup(dev_priv);
1336
	spin_unlock_irq(&dev_priv->irq_lock);
1337

1338
	intel_dp_mst_resume(dev_priv);
1339

1340 1341
	intel_display_resume(dev);

1342 1343
	drm_kms_helper_poll_enable(dev);

1344 1345 1346
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
1347
	 * bother with the tiny race here where we might lose hotplug
1348 1349 1350
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
1351

1352
	intel_opregion_resume(dev_priv);
1353

1354
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1355

1356 1357
	intel_power_domains_enable(dev_priv);

1358
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1359

1360
	return 0;
1361 1362
}

1363
static int i915_drm_resume_early(struct drm_device *dev)
1364
{
1365
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1366
	struct pci_dev *pdev = dev_priv->drm.pdev;
1367
	int ret;
1368

1369 1370 1371 1372 1373 1374 1375 1376 1377
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
1389
	ret = pci_set_power_state(pdev, PCI_D0);
1390
	if (ret) {
1391 1392
		drm_err(&dev_priv->drm,
			"failed to set PCI D0 power state (%d)\n", ret);
1393
		return ret;
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
1409 1410
	if (pci_enable_device(pdev))
		return -EIO;
1411

D
David Weinehall 已提交
1412
	pci_set_master(pdev);
1413

1414
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1415

1416
	ret = vlv_resume_prepare(dev_priv, false);
1417
	if (ret)
1418
		drm_err(&dev_priv->drm,
1419
			"Resume prepare failed: %d, continuing anyway\n", ret);
1420

1421 1422
	intel_uncore_resume_early(&dev_priv->uncore);

1423
	intel_gt_check_and_clear_faults(&dev_priv->gt);
1424

1425
	intel_display_power_resume_early(dev_priv);
1426

1427
	intel_power_domains_resume(dev_priv);
1428

1429
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1430

1431
	return ret;
1432 1433
}

1434
int i915_resume_switcheroo(struct drm_i915_private *i915)
1435
{
1436
	int ret;
1437

1438
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1439 1440
		return 0;

1441
	ret = i915_drm_resume_early(&i915->drm);
1442 1443 1444
	if (ret)
		return ret;

1445
	return i915_drm_resume(&i915->drm);
1446 1447
}

1448 1449
static int i915_pm_prepare(struct device *kdev)
{
1450
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1451

1452
	if (!i915) {
1453 1454 1455 1456
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

1457
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1458 1459
		return 0;

1460
	return i915_drm_prepare(&i915->drm);
1461 1462
}

1463
static int i915_pm_suspend(struct device *kdev)
1464
{
1465
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1466

1467
	if (!i915) {
1468
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1469 1470
		return -ENODEV;
	}
1471

1472
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1473 1474
		return 0;

1475
	return i915_drm_suspend(&i915->drm);
1476 1477
}

1478
static int i915_pm_suspend_late(struct device *kdev)
1479
{
1480
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1481 1482

	/*
D
Damien Lespiau 已提交
1483
	 * We have a suspend ordering issue with the snd-hda driver also
1484 1485 1486 1487 1488 1489 1490
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1491
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1492
		return 0;
1493

1494
	return i915_drm_suspend_late(&i915->drm, false);
1495 1496
}

1497
static int i915_pm_poweroff_late(struct device *kdev)
1498
{
1499
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1500

1501
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1502 1503
		return 0;

1504
	return i915_drm_suspend_late(&i915->drm, true);
1505 1506
}

1507
static int i915_pm_resume_early(struct device *kdev)
1508
{
1509
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1510

1511
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1512 1513
		return 0;

1514
	return i915_drm_resume_early(&i915->drm);
1515 1516
}

1517
static int i915_pm_resume(struct device *kdev)
1518
{
1519
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1520

1521
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1522 1523
		return 0;

1524
	return i915_drm_resume(&i915->drm);
1525 1526
}

1527
/* freeze: before creating the hibernation_image */
1528
static int i915_pm_freeze(struct device *kdev)
1529
{
1530
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1531 1532
	int ret;

1533 1534
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(&i915->drm);
1535 1536 1537
		if (ret)
			return ret;
	}
1538

1539
	ret = i915_gem_freeze(i915);
1540 1541 1542 1543
	if (ret)
		return ret;

	return 0;
1544 1545
}

1546
static int i915_pm_freeze_late(struct device *kdev)
1547
{
1548
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1549 1550
	int ret;

1551 1552
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(&i915->drm, true);
1553 1554 1555
		if (ret)
			return ret;
	}
1556

1557
	ret = i915_gem_freeze_late(i915);
1558 1559 1560 1561
	if (ret)
		return ret;

	return 0;
1562 1563 1564
}

/* thaw: called after creating the hibernation image, but before turning off. */
1565
static int i915_pm_thaw_early(struct device *kdev)
1566
{
1567
	return i915_pm_resume_early(kdev);
1568 1569
}

1570
static int i915_pm_thaw(struct device *kdev)
1571
{
1572
	return i915_pm_resume(kdev);
1573 1574 1575
}

/* restore: called after loading the hibernation image. */
1576
static int i915_pm_restore_early(struct device *kdev)
1577
{
1578
	return i915_pm_resume_early(kdev);
1579 1580
}

1581
static int i915_pm_restore(struct device *kdev)
1582
{
1583
	return i915_pm_resume(kdev);
1584 1585
}

1586
static int intel_runtime_suspend(struct device *kdev)
1587
{
1588
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1589
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1590
	int ret;
1591

1592
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1593 1594
		return -ENODEV;

1595
	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1596

1597
	disable_rpm_wakeref_asserts(rpm);
1598

1599 1600 1601 1602
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
1603
	i915_gem_runtime_suspend(dev_priv);
1604

1605
	intel_gt_runtime_suspend(&dev_priv->gt);
1606

1607
	intel_runtime_pm_disable_interrupts(dev_priv);
1608

1609
	intel_uncore_suspend(&dev_priv->uncore);
1610

1611 1612
	intel_display_power_suspend(dev_priv);

1613
	ret = vlv_suspend_complete(dev_priv);
1614
	if (ret) {
1615 1616
		drm_err(&dev_priv->drm,
			"Runtime suspend failed, disabling it (%d)\n", ret);
1617
		intel_uncore_runtime_resume(&dev_priv->uncore);
1618

1619
		intel_runtime_pm_enable_interrupts(dev_priv);
1620

1621
		intel_gt_runtime_resume(&dev_priv->gt);
1622

1623
		enable_rpm_wakeref_asserts(rpm);
1624

1625 1626
		return ret;
	}
1627

1628
	enable_rpm_wakeref_asserts(rpm);
1629
	intel_runtime_pm_driver_release(rpm);
1630

1631
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1632 1633
		drm_err(&dev_priv->drm,
			"Unclaimed access detected prior to suspending\n");
1634

1635
	rpm->suspended = true;
1636 1637

	/*
1638 1639
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1640
	 */
1641
	if (IS_BROADWELL(dev_priv)) {
1642 1643 1644 1645 1646 1647
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
1648
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1649
	} else {
1650 1651 1652 1653 1654 1655 1656
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
1657
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1658
	}
1659

1660
	assert_forcewakes_inactive(&dev_priv->uncore);
1661

1662
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1663 1664
		intel_hpd_poll_init(dev_priv);

1665
	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1666 1667 1668
	return 0;
}

1669
static int intel_runtime_resume(struct device *kdev)
1670
{
1671
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1672
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1673
	int ret;
1674

1675
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1676
		return -ENODEV;
1677

1678
	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1679

1680
	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1681
	disable_rpm_wakeref_asserts(rpm);
1682

1683
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1684
	rpm->suspended = false;
1685
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1686 1687
		drm_dbg(&dev_priv->drm,
			"Unclaimed access during suspend, bios?\n");
1688

1689 1690
	intel_display_power_resume(dev_priv);

1691
	ret = vlv_resume_prepare(dev_priv, true);
1692

1693
	intel_uncore_runtime_resume(&dev_priv->uncore);
1694

1695 1696
	intel_runtime_pm_enable_interrupts(dev_priv);

1697 1698 1699 1700
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1701
	intel_gt_runtime_resume(&dev_priv->gt);
1702

1703 1704 1705 1706 1707
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
1708
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1709 1710
		intel_hpd_init(dev_priv);

1711 1712
	intel_enable_ipc(dev_priv);

1713
	enable_rpm_wakeref_asserts(rpm);
1714

1715
	if (ret)
1716 1717
		drm_err(&dev_priv->drm,
			"Runtime resume failed, disabling it (%d)\n", ret);
1718
	else
1719
		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1720 1721

	return ret;
1722 1723
}

1724
const struct dev_pm_ops i915_pm_ops = {
1725 1726 1727 1728
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
1729
	.prepare = i915_pm_prepare,
1730
	.suspend = i915_pm_suspend,
1731 1732
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1733
	.resume = i915_pm_resume,
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
1750 1751 1752 1753
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
1754
	.poweroff = i915_pm_suspend,
1755
	.poweroff_late = i915_pm_poweroff_late,
1756 1757
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
1758 1759

	/* S0ix (via runtime suspend) event handlers */
1760 1761
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1762 1763
};

1764 1765 1766
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
1767
	.release = drm_release_noglobal,
1768
	.unlocked_ioctl = drm_ioctl,
1769
	.mmap = i915_gem_mmap,
1770 1771
	.poll = drm_poll,
	.read = drm_read,
1772
	.compat_ioctl = i915_ioc32_compat_ioctl,
1773 1774 1775
	.llseek = noop_llseek,
};

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1790
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1802
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
1803
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1804 1805
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1806
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1807 1808
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1809
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1810 1811 1812 1813 1814 1815
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1816
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1817 1818
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1819 1820
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1821
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1822
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1823
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
1824 1825 1826 1827
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1828
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1829
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1830 1831 1832 1833 1834 1835
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1836
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1837 1838 1839
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1840 1841
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1842 1843
};

L
Linus Torvalds 已提交
1844
static struct drm_driver driver = {
1845 1846
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1847
	 */
1848
	.driver_features =
1849
	    DRIVER_GEM |
1850
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
1851
	.release = i915_driver_release,
1852
	.open = i915_driver_open,
1853
	.lastclose = i915_driver_lastclose,
1854
	.postclose = i915_driver_postclose,
1855

1856
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
1857
	.gem_free_object_unlocked = i915_gem_free_object,
1858 1859 1860 1861 1862 1863

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1864
	.dumb_create = i915_gem_dumb_create,
1865 1866
	.dumb_map_offset = i915_gem_dumb_mmap_offset,

L
Linus Torvalds 已提交
1867
	.ioctls = i915_ioctls,
1868
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1869
	.fops = &i915_driver_fops,
1870 1871 1872 1873 1874 1875
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1876
};