i915_drv.c 52.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38
#include <linux/pnp.h>
#include <linux/slab.h>
39
#include <linux/vga_switcheroo.h>
40 41 42
#include <linux/vt.h>
#include <acpi/video.h>

43
#include <drm/drm_atomic_helper.h>
44 45
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
46
#include <drm/drm_managed.h>
47
#include <drm/drm_probe_helper.h>
48

49 50 51 52
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
53
#include "display/intel_csr.h"
54
#include "display/intel_display_debugfs.h"
55
#include "display/intel_display_types.h"
56
#include "display/intel_dp.h"
57 58 59 60
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
61
#include "display/intel_psr.h"
62
#include "display/intel_sprite.h"
63
#include "display/intel_vga.h"
64

65
#include "gem/i915_gem_context.h"
66
#include "gem/i915_gem_ioctls.h"
67
#include "gem/i915_gem_mman.h"
68
#include "gt/intel_gt.h"
69
#include "gt/intel_gt_pm.h"
70
#include "gt/intel_rc6.h"
71

72
#include "i915_debugfs.h"
73
#include "i915_drv.h"
74
#include "i915_ioc32.h"
75
#include "i915_irq.h"
76
#include "i915_memcpy.h"
77
#include "i915_perf.h"
L
Lionel Landwerlin 已提交
78
#include "i915_query.h"
79
#include "i915_suspend.h"
80
#include "i915_switcheroo.h"
81
#include "i915_sysfs.h"
82
#include "i915_trace.h"
83
#include "i915_vgpu.h"
84
#include "intel_dram.h"
85
#include "intel_gvt.h"
86
#include "intel_memory_region.h"
87
#include "intel_pm.h"
88
#include "vlv_suspend.h"
J
Jesse Barnes 已提交
89

90 91
static struct drm_driver driver;

92
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
93
{
94 95 96 97
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
98
	if (!dev_priv->bridge_dev) {
99
		drm_err(&dev_priv->drm, "bridge device not found\n");
100 101 102 103 104 105 106
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
107
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
108
{
109
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
110 111 112 113
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

114
	if (INTEL_GEN(dev_priv) >= 4)
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
136
		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
137 138 139 140
		dev_priv->mch_res.start = 0;
		return ret;
	}

141
	if (INTEL_GEN(dev_priv) >= 4)
142 143 144 145 146 147 148 149 150 151
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
152
intel_setup_mchbar(struct drm_i915_private *dev_priv)
153
{
154
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
155 156 157
	u32 temp;
	bool enabled;

158
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
159 160 161 162
		return;

	dev_priv->mchbar_need_disable = false;

163
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
164 165 166 167 168 169 170 171 172 173 174
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

175
	if (intel_alloc_mchbar_resource(dev_priv))
176 177 178 179 180
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
181
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
182 183 184 185 186 187 188 189 190
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
191
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
192
{
193
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
194 195

	if (dev_priv->mchbar_need_disable) {
196
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

219 220
/* part #1: call before irq install */
static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
221 222 223
{
	int ret;

224
	if (i915_inject_probe_failure(i915))
225 226
		return -ENODEV;

227 228 229
	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
		ret = drm_vblank_init(&i915->drm,
				      INTEL_NUM_PIPES(i915));
230
		if (ret)
231
			return ret;
232 233
	}

234
	intel_bios_init(i915);
235

236 237
	ret = intel_vga_register(i915);
	if (ret)
238
		goto cleanup_bios;
239

240
	intel_power_domains_init_hw(i915, false);
241

242
	intel_csr_ucode_init(i915);
243

244 245
	ret = intel_modeset_init_noirq(i915);
	if (ret)
246
		goto cleanup_vga_client_pw_domain_csr;
247

248 249
	return 0;

250 251 252
cleanup_vga_client_pw_domain_csr:
	intel_csr_ucode_fini(i915);
	intel_power_domains_driver_remove(i915);
253
	intel_vga_unregister(i915);
254 255
cleanup_bios:
	intel_bios_driver_remove(i915);
256 257 258 259 260 261 262
	return ret;
}

/* part #2: call after irq install */
static int i915_driver_modeset_probe(struct drm_i915_private *i915)
{
	int ret;
263 264 265

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
266
	ret = intel_modeset_init(i915);
267
	if (ret)
268
		goto out;
269

270
	ret = i915_gem_init(i915);
271
	if (ret)
272
		goto cleanup_modeset;
273

274
	intel_overlay_setup(i915);
275

276
	if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
277 278
		return 0;

279
	ret = intel_fbdev_init(&i915->drm);
280 281 282 283
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
284
	intel_hpd_init(i915);
285

286
	intel_init_ipc(i915);
287

288 289
	intel_psr_set_force_mode_changed(i915->psr.dp);

290 291 292
	return 0;

cleanup_gem:
293 294 295
	i915_gem_suspend(i915);
	i915_gem_driver_remove(i915);
	i915_gem_driver_release(i915);
296
cleanup_modeset:
297
	/* FIXME */
298
	intel_modeset_driver_remove(i915);
299 300
	intel_irq_uninstall(i915);
	intel_modeset_driver_remove_noirq(i915);
301 302 303 304
out:
	return ret;
}

305
/* part #1: call before irq uninstall */
306 307
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
308
	intel_modeset_driver_remove(i915);
309
}
310

311 312 313
/* part #2: call after irq uninstall */
static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
{
314
	intel_csr_ucode_fini(i915);
315

316
	intel_power_domains_driver_remove(i915);
317

318
	intel_vga_unregister(i915);
319

320
	intel_bios_driver_remove(i915);
321 322
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
343
	 * by the GPU. i915_retire_requests() is called directly when we
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
367
	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
368 369 370 371 372 373 374 375 376 377

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

378 379 380 381
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
382 383 384 385 386
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
387 388 389
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
390 391 392 393
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
394
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
395
	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
396
	pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
397

398
	if (pre) {
399
		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
400
			  "It may not be fully functional.\n");
401 402
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
403 404
}

405 406 407 408 409 410
static void sanitize_gpu(struct drm_i915_private *i915)
{
	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
		__intel_gt_reset(&i915->gt, ALL_ENGINES);
}

411
/**
412
 * i915_driver_early_probe - setup state not requiring device access
413 414 415 416 417 418 419 420
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
421
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
422 423 424
{
	int ret = 0;

425
	if (i915_inject_probe_failure(dev_priv))
426 427
		return -ENODEV;

428 429
	intel_device_info_subplatform_init(dev_priv);

430
	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
431
	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
432

433 434 435
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
L
Lyude 已提交
436

437
	mutex_init(&dev_priv->sb_lock);
438
	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
439

440 441 442
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
443
	mutex_init(&dev_priv->hdcp_comp_mutex);
444

445
	i915_memcpy_init_early(dev_priv);
446
	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
447

448 449
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
450
		return ret;
451

452
	ret = vlv_suspend_init(dev_priv);
453 454 455
	if (ret < 0)
		goto err_workqueues;

456 457
	intel_wopcm_init_early(&dev_priv->wopcm);

458
	intel_gt_init_early(&dev_priv->gt, dev_priv);
459

460
	i915_gem_init_early(dev_priv);
461

462
	/* This must be called before any calls to HAS_PCH_* */
463
	intel_detect_pch(dev_priv);
464

465
	intel_pm_setup(dev_priv);
466
	intel_init_dpio(dev_priv);
467 468
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
469
		goto err_gem;
470 471 472 473 474
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);

475
	intel_detect_preproduction_hw(dev_priv);
476 477 478

	return 0;

479
err_gem:
480
	i915_gem_cleanup_early(dev_priv);
481
	intel_gt_driver_late_release(&dev_priv->gt);
482
	vlv_suspend_cleanup(dev_priv);
483
err_workqueues:
484 485 486 487 488
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
489
 * i915_driver_late_release - cleanup the setup done in
490
 *			       i915_driver_early_probe()
491 492
 * @dev_priv: device private
 */
493
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
494
{
495
	intel_irq_fini(dev_priv);
496
	intel_power_domains_cleanup(dev_priv);
497
	i915_gem_cleanup_early(dev_priv);
498
	intel_gt_driver_late_release(&dev_priv->gt);
499
	vlv_suspend_cleanup(dev_priv);
500
	i915_workqueues_cleanup(dev_priv);
501

502
	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
503
	mutex_destroy(&dev_priv->sb_lock);
504 505

	i915_params_free(&dev_priv->params);
506 507 508
}

/**
509
 * i915_driver_mmio_probe - setup device MMIO
510 511 512 513 514 515 516
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
517
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
518 519 520
{
	int ret;

521
	if (i915_inject_probe_failure(dev_priv))
522 523
		return -ENODEV;

524
	if (i915_get_bridge_dev(dev_priv))
525 526
		return -EIO;

527
	ret = intel_uncore_init_mmio(&dev_priv->uncore);
528
	if (ret < 0)
529
		goto err_bridge;
530

531 532
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
533

534
	ret = intel_gt_init_mmio(&dev_priv->gt);
535 536 537
	if (ret)
		goto err_uncore;

538 539 540
	/* As early as possible, scrub existing GPU state before clobbering */
	sanitize_gpu(dev_priv);

541 542
	return 0;

543
err_uncore:
544
	intel_teardown_mchbar(dev_priv);
545
	intel_uncore_fini_mmio(&dev_priv->uncore);
546
err_bridge:
547 548 549 550 551 552
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
553
 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
554 555
 * @dev_priv: device private
 */
556
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
557
{
558
	intel_teardown_mchbar(dev_priv);
559
	intel_uncore_fini_mmio(&dev_priv->uncore);
560 561 562
	pci_dev_put(dev_priv->bridge_dev);
}

563 564
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
565
	intel_gvt_sanitize_options(dev_priv);
566 567
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
/**
 * i915_set_dma_info - set all relevant PCI dma info as configured for the
 * platform
 * @i915: valid i915 instance
 *
 * Set the dma max segment size, device and coherent masks.  The dma mask set
 * needs to occur before i915_ggtt_probe_hw.
 *
 * A couple of platforms have special needs.  Address them as well.
 *
 */
static int i915_set_dma_info(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;
	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
	int ret;

	GEM_BUG_ON(!mask_size);

	/*
	 * We don't have a max segment size, so set it to the max so sg's
	 * debugging layer doesn't complain
	 */
	dma_set_max_seg_size(&pdev->dev, UINT_MAX);

	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN(i915, 2))
		mask_size = 30;

	/*
	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_I965G(i915) || IS_I965GM(i915))
		mask_size = 32;

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
	if (ret)
		goto mask_err;

	return 0;

mask_err:
	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
	return ret;
}

624
/**
625
 * i915_driver_hw_probe - setup state requiring device access
626 627 628 629 630
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
631
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
632
{
D
David Weinehall 已提交
633
	struct pci_dev *pdev = dev_priv->drm.pdev;
634 635
	int ret;

636
	if (i915_inject_probe_failure(dev_priv))
637 638
		return -ENODEV;

639
	intel_device_info_runtime_init(dev_priv);
640

641 642
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
643
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
644 645 646 647 648 649
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

650 651 652 653 654 655 656 657 658 659 660 661 662 663
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

664
	intel_sanitize_options(dev_priv);
665

666
	/* needs to be done before ggtt probe */
667
	intel_dram_edram_detect(dev_priv);
668

669 670 671 672
	ret = i915_set_dma_info(dev_priv);
	if (ret)
		return ret;

673 674
	i915_perf_init(dev_priv);

675
	ret = i915_ggtt_probe_hw(dev_priv);
676
	if (ret)
677
		goto err_perf;
678

679 680
	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
	if (ret)
681
		goto err_ggtt;
682

683
	ret = i915_ggtt_init_hw(dev_priv);
684
	if (ret)
685
		goto err_ggtt;
686

687 688 689 690
	ret = intel_memory_regions_hw_probe(dev_priv);
	if (ret)
		goto err_ggtt;

691
	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
692

693
	ret = i915_ggtt_enable_hw(dev_priv);
694
	if (ret) {
695
		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
696
		goto err_mem_regions;
697 698
	}

D
David Weinehall 已提交
699
	pci_set_master(pdev);
700

701
	cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
702

703
	intel_gt_init_workarounds(dev_priv);
704 705 706 707 708 709 710 711 712

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
713 714 715 716
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
717 718 719 720 721 722
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
723
	 */
724
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
725
		if (pci_enable_msi(pdev) < 0)
726
			drm_dbg(&dev_priv->drm, "can't enable MSI");
727 728
	}

729 730
	ret = intel_gvt_init(dev_priv);
	if (ret)
731 732 733
		goto err_msi;

	intel_opregion_setup(dev_priv);
734 735 736 737
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
738
	intel_dram_detect(dev_priv);
739

740
	intel_bw_init_hw(dev_priv);
741

742 743
	return 0;

744 745 746
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
747
	cpu_latency_qos_remove_request(&dev_priv->pm_qos);
748 749
err_mem_regions:
	intel_memory_regions_driver_release(dev_priv);
750
err_ggtt:
751
	i915_ggtt_driver_release(dev_priv);
752 753
err_perf:
	i915_perf_fini(dev_priv);
754 755 756 757
	return ret;
}

/**
758
 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
759 760
 * @dev_priv: device private
 */
761
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
762
{
D
David Weinehall 已提交
763
	struct pci_dev *pdev = dev_priv->drm.pdev;
764

765 766
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
767 768
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
769

770
	cpu_latency_qos_remove_request(&dev_priv->pm_qos);
771 772 773 774 775 776 777 778 779 780 781
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
782
	struct drm_device *dev = &dev_priv->drm;
783

784
	i915_gem_driver_register(dev_priv);
785
	i915_pmu_register(dev_priv);
786

787
	intel_vgpu_register(dev_priv);
788 789 790 791

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
792
		intel_display_debugfs_register(dev_priv);
D
David Weinehall 已提交
793
		i915_setup_sysfs(dev_priv);
794 795 796

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
797
	} else
798 799
		drm_err(&dev_priv->drm,
			"Failed to register driver for userspace access!\n");
800

801
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
802 803 804 805 806
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

807
	intel_gt_driver_register(&dev_priv->gt);
808

809
	intel_audio_init(dev_priv);
810 811 812 813 814 815 816 817 818

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
819 820 821 822 823

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
824
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
825
		drm_kms_helper_poll_init(dev);
826

827
	intel_power_domains_enable(dev_priv);
828
	intel_runtime_pm_enable(&dev_priv->runtime_pm);
829 830 831 832 833

	intel_register_dsm_handler();

	if (i915_switcheroo_register(dev_priv))
		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
834 835 836 837 838 839 840 841
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
842 843 844 845
	i915_switcheroo_unregister(dev_priv);

	intel_unregister_dsm_handler();

846
	intel_runtime_pm_disable(&dev_priv->runtime_pm);
847
	intel_power_domains_disable(dev_priv);
848

849
	intel_fbdev_unregister(dev_priv);
850
	intel_audio_deinit(dev_priv);
851

852 853 854 855 856 857 858
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

859
	intel_gt_driver_unregister(&dev_priv->gt);
860 861 862
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

863
	i915_perf_unregister(dev_priv);
864
	i915_pmu_unregister(dev_priv);
865

D
David Weinehall 已提交
866
	i915_teardown_sysfs(dev_priv);
867
	drm_dev_unplug(&dev_priv->drm);
868

869
	i915_gem_driver_unregister(dev_priv);
870 871
}

872 873
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
874
	if (drm_debug_enabled(DRM_UT_DRIVER)) {
875 876
		struct drm_printer p = drm_debug_printer("i915 device info:");

877
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
878 879 880
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
881 882
			   intel_subplatform(RUNTIME_INFO(dev_priv),
					     INTEL_INFO(dev_priv)->platform),
883 884
			   INTEL_GEN(dev_priv));

885 886
		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
887
		intel_gt_info_print(&dev_priv->gt.info, &p);
888 889 890
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
891
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
892
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
893
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
894
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
895 896
		drm_info(&dev_priv->drm,
			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
897 898
}

899 900 901 902 903 904 905 906
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;

D
Daniel Vetter 已提交
907 908 909 910
	i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
				  struct drm_i915_private, drm);
	if (IS_ERR(i915))
		return i915;
911

912 913
	i915->drm.pdev = pdev;
	pci_set_drvdata(pdev, i915);
914

915 916 917
	/* Device parameters start as a copy of module parameters. */
	i915_params_copy(&i915->params, &i915_modparams);

918 919 920
	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
921
	RUNTIME_INFO(i915)->device_id = pdev->device;
922

923
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
924 925 926 927

	return i915;
}

928
/**
929
 * i915_driver_probe - setup chip and create an initial config
930 931
 * @pdev: PCI device
 * @ent: matching PCI ID entry
932
 *
933
 * The driver probe routine has to do several things:
934 935 936 937 938
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
939
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
940
{
941 942
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
943
	struct drm_i915_private *i915;
944
	int ret;
945

946 947 948
	i915 = i915_driver_create(pdev, ent);
	if (IS_ERR(i915))
		return PTR_ERR(i915);
949

950
	/* Disable nuclear pageflip by default on pre-ILK */
951
	if (!i915->params.nuclear_pageflip && match_info->gen < 5)
952
		i915->drm.driver_features &= ~DRIVER_ATOMIC;
953

954 955 956 957
	/*
	 * Check if we support fake LMEM -- for now we only unleash this for
	 * the live selftests(test-and-exit).
	 */
958
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
959
	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
960
		if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
961
		    i915->params.fake_lmem_start) {
962
			mkwrite_device_info(i915)->memory_regions =
963
				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
964 965 966
			mkwrite_device_info(i915)->is_dgfx = true;
			GEM_BUG_ON(!HAS_LMEM(i915));
			GEM_BUG_ON(!IS_DGFX(i915));
967 968
		}
	}
969
#endif
970

971 972
	ret = pci_enable_device(pdev);
	if (ret)
973
		goto out_fini;
D
Damien Lespiau 已提交
974

975
	ret = i915_driver_early_probe(i915);
976 977
	if (ret < 0)
		goto out_pci_disable;
978

979
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
L
Linus Torvalds 已提交
980

981
	intel_vgpu_detect(i915);
982

983
	ret = i915_driver_mmio_probe(i915);
984 985
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
986

987
	ret = i915_driver_hw_probe(i915);
988 989
	if (ret < 0)
		goto out_cleanup_mmio;
990

991
	ret = i915_driver_modeset_probe_noirq(i915);
992
	if (ret < 0)
993
		goto out_cleanup_hw;
994

995 996 997 998 999 1000 1001 1002
	ret = intel_irq_install(i915);
	if (ret)
		goto out_cleanup_modeset;

	ret = i915_driver_modeset_probe(i915);
	if (ret < 0)
		goto out_cleanup_irq;

1003
	i915_driver_register(i915);
1004

1005
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1006

1007
	i915_welcome_messages(i915);
1008

1009 1010
	i915->do_release = true;

1011 1012
	return 0;

1013 1014 1015
out_cleanup_irq:
	intel_irq_uninstall(i915);
out_cleanup_modeset:
1016
	i915_driver_modeset_remove_noirq(i915);
1017
out_cleanup_hw:
1018 1019 1020
	i915_driver_hw_remove(i915);
	intel_memory_regions_driver_release(i915);
	i915_ggtt_driver_release(i915);
1021
out_cleanup_mmio:
1022
	i915_driver_mmio_release(i915);
1023
out_runtime_pm_put:
1024 1025
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
	i915_driver_late_release(i915);
1026 1027
out_pci_disable:
	pci_disable_device(pdev);
1028
out_fini:
1029
	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
1030 1031 1032
	return ret;
}

1033
void i915_driver_remove(struct drm_i915_private *i915)
1034
{
1035
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1036

1037
	i915_driver_unregister(i915);
1038

1039 1040 1041
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

1042
	i915_gem_suspend(i915);
B
Ben Widawsky 已提交
1043

1044
	drm_atomic_helper_shutdown(&i915->drm);
1045

1046
	intel_gvt_driver_remove(i915);
1047

1048
	i915_driver_modeset_remove(i915);
1049

1050 1051
	intel_irq_uninstall(i915);

1052
	intel_modeset_driver_remove_noirq(i915);
1053

1054 1055
	i915_reset_error_state(i915);
	i915_gem_driver_remove(i915);
1056

1057
	i915_driver_modeset_remove_noirq(i915);
1058

1059
	i915_driver_hw_remove(i915);
1060

1061
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1062 1063 1064 1065 1066
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
1067
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1068

1069 1070 1071
	if (!dev_priv->do_release)
		return;

1072
	disable_rpm_wakeref_asserts(rpm);
1073

1074
	i915_gem_driver_release(dev_priv);
1075

1076
	intel_memory_regions_driver_release(dev_priv);
1077
	i915_ggtt_driver_release(dev_priv);
1078

1079
	i915_driver_mmio_release(dev_priv);
1080

1081
	enable_rpm_wakeref_asserts(rpm);
1082
	intel_runtime_pm_driver_release(rpm);
1083

1084
	i915_driver_late_release(dev_priv);
1085 1086
}

1087
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1088
{
1089
	struct drm_i915_private *i915 = to_i915(dev);
1090
	int ret;
1091

1092
	ret = i915_gem_open(i915, file);
1093 1094
	if (ret)
		return ret;
1095

1096 1097
	return 0;
}
1098

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1116

1117
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1118
{
1119 1120
	struct drm_i915_file_private *file_priv = file->driver_priv;

1121
	i915_gem_context_close(file);
1122 1123
	i915_gem_release(dev, file);

1124
	kfree_rcu(file_priv, rcu);
1125 1126 1127

	/* Catch up with all the deferred frees from "this" client */
	i915_gem_flush_free_objects(to_i915(dev));
1128 1129
}

1130 1131
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1132
	struct drm_device *dev = &dev_priv->drm;
1133
	struct intel_encoder *encoder;
1134 1135

	drm_modeset_lock_all(dev);
1136 1137 1138
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1139 1140 1141
	drm_modeset_unlock_all(dev);
}

1142 1143 1144 1145 1146 1147 1148 1149
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1150

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
1161
	i915_gem_suspend(i915);
1162

1163
	return 0;
1164 1165
}

1166
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1167
{
1168
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1169
	struct pci_dev *pdev = dev_priv->drm.pdev;
1170
	pci_power_t opregion_target_state;
1171

1172
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1173

1174 1175
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1176
	intel_power_domains_disable(dev_priv);
1177

1178 1179
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
1180
	pci_save_state(pdev);
J
Jesse Barnes 已提交
1181

1182
	intel_display_suspend(dev);
1183

1184
	intel_dp_mst_suspend(dev_priv);
1185

1186 1187
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1188

1189
	intel_suspend_encoders(dev_priv);
1190

1191
	intel_suspend_hw(dev_priv);
1192

1193
	i915_ggtt_suspend(&dev_priv->ggtt);
1194

1195
	i915_save_state(dev_priv);
1196

1197
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1198
	intel_opregion_suspend(dev_priv, opregion_target_state);
1199

1200
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1201

1202 1203
	dev_priv->suspend_count++;

1204
	intel_csr_ucode_suspend(dev_priv);
1205

1206
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1207

1208
	return 0;
1209 1210
}

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

1223
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1224
{
1225
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1226
	struct pci_dev *pdev = dev_priv->drm.pdev;
1227
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1228
	int ret;
1229

1230
	disable_rpm_wakeref_asserts(rpm);
1231

1232 1233
	i915_gem_suspend_late(dev_priv);

1234
	intel_uncore_suspend(&dev_priv->uncore);
1235

1236 1237
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
1238

1239 1240
	intel_display_power_suspend_late(dev_priv);

1241
	ret = vlv_suspend_complete(dev_priv);
1242
	if (ret) {
1243
		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1244
		intel_power_domains_resume(dev_priv);
1245

1246
		goto out;
1247 1248
	}

D
David Weinehall 已提交
1249
	pci_disable_device(pdev);
1250
	/*
1251
	 * During hibernation on some platforms the BIOS may try to access
1252 1253
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1254 1255 1256 1257 1258 1259 1260
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1261
	 */
1262
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
1263
		pci_set_power_state(pdev, PCI_D3hot);
1264

1265
out:
1266
	enable_rpm_wakeref_asserts(rpm);
1267
	if (!dev_priv->uncore.user_forcewake_count)
1268
		intel_runtime_pm_driver_release(rpm);
1269 1270

	return ret;
1271 1272
}

1273
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1274 1275 1276
{
	int error;

1277 1278
	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
			     state.event != PM_EVENT_FREEZE))
1279
		return -EINVAL;
1280

1281
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1282
		return 0;
1283

1284
	error = i915_drm_suspend(&i915->drm);
1285 1286 1287
	if (error)
		return error;

1288
	return i915_drm_suspend_late(&i915->drm, false);
J
Jesse Barnes 已提交
1289 1290
}

1291
static int i915_drm_resume(struct drm_device *dev)
1292
{
1293
	struct drm_i915_private *dev_priv = to_i915(dev);
1294
	int ret;
1295

1296
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1297

1298 1299
	sanitize_gpu(dev_priv);

1300
	ret = i915_ggtt_enable_hw(dev_priv);
1301
	if (ret)
1302
		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1303

1304
	i915_ggtt_resume(&dev_priv->ggtt);
1305

1306 1307
	intel_csr_ucode_resume(dev_priv);

1308
	i915_restore_state(dev_priv);
1309
	intel_pps_unlock_regs_wa(dev_priv);
1310

1311
	intel_init_pch_refclk(dev_priv);
1312

1313 1314 1315 1316 1317
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
1318 1319
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
1320 1321 1322 1323 1324
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1325 1326
	drm_mode_config_reset(dev);

1327
	i915_gem_resume(dev_priv);
1328

1329
	intel_modeset_init_hw(dev_priv);
1330
	intel_init_clock_gating(dev_priv);
1331

1332 1333
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
1334
		dev_priv->display.hpd_irq_setup(dev_priv);
1335
	spin_unlock_irq(&dev_priv->irq_lock);
1336

1337
	intel_dp_mst_resume(dev_priv);
1338

1339 1340
	intel_display_resume(dev);

1341 1342
	drm_kms_helper_poll_enable(dev);

1343 1344 1345
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
1346
	 * bother with the tiny race here where we might lose hotplug
1347 1348 1349
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
1350

1351
	intel_opregion_resume(dev_priv);
1352

1353
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1354

1355 1356
	intel_power_domains_enable(dev_priv);

1357
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1358

1359
	return 0;
1360 1361
}

1362
static int i915_drm_resume_early(struct drm_device *dev)
1363
{
1364
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1365
	struct pci_dev *pdev = dev_priv->drm.pdev;
1366
	int ret;
1367

1368 1369 1370 1371 1372 1373 1374 1375 1376
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
1388
	ret = pci_set_power_state(pdev, PCI_D0);
1389
	if (ret) {
1390 1391
		drm_err(&dev_priv->drm,
			"failed to set PCI D0 power state (%d)\n", ret);
1392
		return ret;
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
1408 1409
	if (pci_enable_device(pdev))
		return -EIO;
1410

D
David Weinehall 已提交
1411
	pci_set_master(pdev);
1412

1413
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1414

1415
	ret = vlv_resume_prepare(dev_priv, false);
1416
	if (ret)
1417
		drm_err(&dev_priv->drm,
1418
			"Resume prepare failed: %d, continuing anyway\n", ret);
1419

1420 1421
	intel_uncore_resume_early(&dev_priv->uncore);

1422
	intel_gt_check_and_clear_faults(&dev_priv->gt);
1423

1424
	intel_display_power_resume_early(dev_priv);
1425

1426
	intel_power_domains_resume(dev_priv);
1427

1428
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1429

1430
	return ret;
1431 1432
}

1433
int i915_resume_switcheroo(struct drm_i915_private *i915)
1434
{
1435
	int ret;
1436

1437
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1438 1439
		return 0;

1440
	ret = i915_drm_resume_early(&i915->drm);
1441 1442 1443
	if (ret)
		return ret;

1444
	return i915_drm_resume(&i915->drm);
1445 1446
}

1447 1448
static int i915_pm_prepare(struct device *kdev)
{
1449
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1450

1451
	if (!i915) {
1452 1453 1454 1455
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

1456
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1457 1458
		return 0;

1459
	return i915_drm_prepare(&i915->drm);
1460 1461
}

1462
static int i915_pm_suspend(struct device *kdev)
1463
{
1464
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1465

1466
	if (!i915) {
1467
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1468 1469
		return -ENODEV;
	}
1470

1471
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1472 1473
		return 0;

1474
	return i915_drm_suspend(&i915->drm);
1475 1476
}

1477
static int i915_pm_suspend_late(struct device *kdev)
1478
{
1479
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1480 1481

	/*
D
Damien Lespiau 已提交
1482
	 * We have a suspend ordering issue with the snd-hda driver also
1483 1484 1485 1486 1487 1488 1489
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1490
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1491
		return 0;
1492

1493
	return i915_drm_suspend_late(&i915->drm, false);
1494 1495
}

1496
static int i915_pm_poweroff_late(struct device *kdev)
1497
{
1498
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1499

1500
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1501 1502
		return 0;

1503
	return i915_drm_suspend_late(&i915->drm, true);
1504 1505
}

1506
static int i915_pm_resume_early(struct device *kdev)
1507
{
1508
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1509

1510
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1511 1512
		return 0;

1513
	return i915_drm_resume_early(&i915->drm);
1514 1515
}

1516
static int i915_pm_resume(struct device *kdev)
1517
{
1518
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1519

1520
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1521 1522
		return 0;

1523
	return i915_drm_resume(&i915->drm);
1524 1525
}

1526
/* freeze: before creating the hibernation_image */
1527
static int i915_pm_freeze(struct device *kdev)
1528
{
1529
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1530 1531
	int ret;

1532 1533
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(&i915->drm);
1534 1535 1536
		if (ret)
			return ret;
	}
1537

1538
	ret = i915_gem_freeze(i915);
1539 1540 1541 1542
	if (ret)
		return ret;

	return 0;
1543 1544
}

1545
static int i915_pm_freeze_late(struct device *kdev)
1546
{
1547
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1548 1549
	int ret;

1550 1551
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(&i915->drm, true);
1552 1553 1554
		if (ret)
			return ret;
	}
1555

1556
	ret = i915_gem_freeze_late(i915);
1557 1558 1559 1560
	if (ret)
		return ret;

	return 0;
1561 1562 1563
}

/* thaw: called after creating the hibernation image, but before turning off. */
1564
static int i915_pm_thaw_early(struct device *kdev)
1565
{
1566
	return i915_pm_resume_early(kdev);
1567 1568
}

1569
static int i915_pm_thaw(struct device *kdev)
1570
{
1571
	return i915_pm_resume(kdev);
1572 1573 1574
}

/* restore: called after loading the hibernation image. */
1575
static int i915_pm_restore_early(struct device *kdev)
1576
{
1577
	return i915_pm_resume_early(kdev);
1578 1579
}

1580
static int i915_pm_restore(struct device *kdev)
1581
{
1582
	return i915_pm_resume(kdev);
1583 1584
}

1585
static int intel_runtime_suspend(struct device *kdev)
1586
{
1587
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1588
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1589
	int ret;
1590

1591
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1592 1593
		return -ENODEV;

1594
	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1595

1596
	disable_rpm_wakeref_asserts(rpm);
1597

1598 1599 1600 1601
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
1602
	i915_gem_runtime_suspend(dev_priv);
1603

1604
	intel_gt_runtime_suspend(&dev_priv->gt);
1605

1606
	intel_runtime_pm_disable_interrupts(dev_priv);
1607

1608
	intel_uncore_suspend(&dev_priv->uncore);
1609

1610 1611
	intel_display_power_suspend(dev_priv);

1612
	ret = vlv_suspend_complete(dev_priv);
1613
	if (ret) {
1614 1615
		drm_err(&dev_priv->drm,
			"Runtime suspend failed, disabling it (%d)\n", ret);
1616
		intel_uncore_runtime_resume(&dev_priv->uncore);
1617

1618
		intel_runtime_pm_enable_interrupts(dev_priv);
1619

1620
		intel_gt_runtime_resume(&dev_priv->gt);
1621

1622
		enable_rpm_wakeref_asserts(rpm);
1623

1624 1625
		return ret;
	}
1626

1627
	enable_rpm_wakeref_asserts(rpm);
1628
	intel_runtime_pm_driver_release(rpm);
1629

1630
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1631 1632
		drm_err(&dev_priv->drm,
			"Unclaimed access detected prior to suspending\n");
1633

1634
	rpm->suspended = true;
1635 1636

	/*
1637 1638
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1639
	 */
1640
	if (IS_BROADWELL(dev_priv)) {
1641 1642 1643 1644 1645 1646
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
1647
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1648
	} else {
1649 1650 1651 1652 1653 1654 1655
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
1656
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1657
	}
1658

1659
	assert_forcewakes_inactive(&dev_priv->uncore);
1660

1661
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1662 1663
		intel_hpd_poll_init(dev_priv);

1664
	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1665 1666 1667
	return 0;
}

1668
static int intel_runtime_resume(struct device *kdev)
1669
{
1670
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1671
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1672
	int ret;
1673

1674
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1675
		return -ENODEV;
1676

1677
	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1678

1679
	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1680
	disable_rpm_wakeref_asserts(rpm);
1681

1682
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1683
	rpm->suspended = false;
1684
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1685 1686
		drm_dbg(&dev_priv->drm,
			"Unclaimed access during suspend, bios?\n");
1687

1688 1689
	intel_display_power_resume(dev_priv);

1690
	ret = vlv_resume_prepare(dev_priv, true);
1691

1692
	intel_uncore_runtime_resume(&dev_priv->uncore);
1693

1694 1695
	intel_runtime_pm_enable_interrupts(dev_priv);

1696 1697 1698 1699
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1700
	intel_gt_runtime_resume(&dev_priv->gt);
1701

1702 1703 1704 1705 1706
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
1707
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1708 1709
		intel_hpd_init(dev_priv);

1710 1711
	intel_enable_ipc(dev_priv);

1712
	enable_rpm_wakeref_asserts(rpm);
1713

1714
	if (ret)
1715 1716
		drm_err(&dev_priv->drm,
			"Runtime resume failed, disabling it (%d)\n", ret);
1717
	else
1718
		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1719 1720

	return ret;
1721 1722
}

1723
const struct dev_pm_ops i915_pm_ops = {
1724 1725 1726 1727
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
1728
	.prepare = i915_pm_prepare,
1729
	.suspend = i915_pm_suspend,
1730 1731
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1732
	.resume = i915_pm_resume,
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
1749 1750 1751 1752
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
1753
	.poweroff = i915_pm_suspend,
1754
	.poweroff_late = i915_pm_poweroff_late,
1755 1756
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
1757 1758

	/* S0ix (via runtime suspend) event handlers */
1759 1760
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1761 1762
};

1763 1764 1765
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
1766
	.release = drm_release_noglobal,
1767
	.unlocked_ioctl = drm_ioctl,
1768
	.mmap = i915_gem_mmap,
1769 1770
	.poll = drm_poll,
	.read = drm_read,
1771
	.compat_ioctl = i915_ioc32_compat_ioctl,
1772 1773 1774
	.llseek = noop_llseek,
};

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1789
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1801
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
1802
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1803 1804
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1805
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1806 1807
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1808
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1809 1810 1811 1812 1813 1814
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1815
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1816 1817
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1818 1819
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1820
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1821
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1822
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
1823 1824 1825 1826
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1827
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1828
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1829 1830 1831 1832 1833 1834
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1835
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1836 1837 1838
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1839 1840
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1841 1842
};

L
Linus Torvalds 已提交
1843
static struct drm_driver driver = {
1844 1845
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1846
	 */
1847
	.driver_features =
1848
	    DRIVER_GEM |
1849 1850
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
	    DRIVER_SYNCOBJ_TIMELINE,
1851
	.release = i915_driver_release,
1852
	.open = i915_driver_open,
1853
	.lastclose = i915_driver_lastclose,
1854
	.postclose = i915_driver_postclose,
1855

1856
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
1857
	.gem_free_object_unlocked = i915_gem_free_object,
1858 1859 1860 1861 1862 1863

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1864
	.dumb_create = i915_gem_dumb_create,
1865 1866
	.dumb_map_offset = i915_gem_dumb_mmap_offset,

L
Linus Torvalds 已提交
1867
	.ioctls = i915_ioctls,
1868
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1869
	.fops = &i915_driver_fops,
1870 1871 1872 1873 1874 1875
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1876
};