i915_drv.c 75.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38
#include <linux/pnp.h>
#include <linux/slab.h>
39
#include <linux/vga_switcheroo.h>
40 41 42
#include <linux/vt.h>
#include <acpi/video.h>

43
#include <drm/drm_atomic_helper.h>
44 45 46
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
47 48
#include <drm/i915_drm.h>

49 50 51 52
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
53
#include "display/intel_display_types.h"
54
#include "display/intel_dp.h"
55 56 57 58 59
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
60
#include "display/intel_vga.h"
61

62
#include "gem/i915_gem_context.h"
63
#include "gem/i915_gem_ioctls.h"
64
#include "gem/i915_gem_mman.h"
65
#include "gt/intel_gt.h"
66
#include "gt/intel_gt_pm.h"
67
#include "gt/intel_rc6.h"
68

69
#include "i915_debugfs.h"
70
#include "i915_drv.h"
71
#include "i915_irq.h"
72
#include "i915_memcpy.h"
73
#include "i915_perf.h"
L
Lionel Landwerlin 已提交
74
#include "i915_query.h"
75
#include "i915_suspend.h"
76
#include "i915_switcheroo.h"
77
#include "i915_sysfs.h"
78
#include "i915_trace.h"
79
#include "i915_vgpu.h"
80
#include "intel_csr.h"
81
#include "intel_memory_region.h"
82
#include "intel_pm.h"
J
Jesse Barnes 已提交
83

84 85
static struct drm_driver driver;

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
struct vlv_s0ix_state {
	/* GAM */
	u32 wr_watermark;
	u32 gfx_prio_ctrl;
	u32 arb_mode;
	u32 gfx_pend_tlb0;
	u32 gfx_pend_tlb1;
	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
	u32 media_max_req_count;
	u32 gfx_max_req_count;
	u32 render_hwsp;
	u32 ecochk;
	u32 bsd_hwsp;
	u32 blt_hwsp;
	u32 tlb_rd_addr;

	/* MBC */
	u32 g3dctl;
	u32 gsckgctl;
	u32 mbctl;

	/* GCP */
	u32 ucgctl1;
	u32 ucgctl3;
	u32 rcgctl1;
	u32 rcgctl2;
	u32 rstctl;
	u32 misccpctl;

	/* GPM */
	u32 gfxpause;
	u32 rpdeuhwtc;
	u32 rpdeuc;
	u32 ecobus;
	u32 pwrdwnupctl;
	u32 rp_down_timeout;
	u32 rp_deucsw;
	u32 rcubmabdtmr;
	u32 rcedata;
	u32 spare2gh;

	/* Display 1 CZ domain */
	u32 gt_imr;
	u32 gt_ier;
	u32 pm_imr;
	u32 pm_ier;
	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];

	/* GT SA CZ domain */
	u32 tilectl;
	u32 gt_fifoctl;
	u32 gtlc_wake_ctrl;
	u32 gtlc_survive;
	u32 pmwgicz;

	/* Display 2 CZ domain */
	u32 gu_ctl0;
	u32 gu_ctl1;
	u32 pcbr;
	u32 clock_gate_dis2;
};

148
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
149
{
150 151 152 153
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
154
	if (!dev_priv->bridge_dev) {
155
		drm_err(&dev_priv->drm, "bridge device not found\n");
156 157 158 159 160 161 162
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
163
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
164
{
165
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
166 167 168 169
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

170
	if (INTEL_GEN(dev_priv) >= 4)
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
192
		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
193 194 195 196
		dev_priv->mch_res.start = 0;
		return ret;
	}

197
	if (INTEL_GEN(dev_priv) >= 4)
198 199 200 201 202 203 204 205 206 207
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
208
intel_setup_mchbar(struct drm_i915_private *dev_priv)
209
{
210
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
211 212 213
	u32 temp;
	bool enabled;

214
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
215 216 217 218
		return;

	dev_priv->mchbar_need_disable = false;

219
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
220 221 222 223 224 225 226 227 228 229 230
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

231
	if (intel_alloc_mchbar_resource(dev_priv))
232 233 234 235 236
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
237
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
238 239 240 241 242 243 244 245 246
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
247
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
248
{
249
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
250 251

	if (dev_priv->mchbar_need_disable) {
252
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

275
static int i915_driver_modeset_probe(struct drm_i915_private *i915)
276 277 278
{
	int ret;

279
	if (i915_inject_probe_failure(i915))
280 281
		return -ENODEV;

282 283 284
	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
		ret = drm_vblank_init(&i915->drm,
				      INTEL_NUM_PIPES(i915));
285 286 287 288
		if (ret)
			goto out;
	}

289
	intel_bios_init(i915);
290

291 292
	ret = intel_vga_register(i915);
	if (ret)
293 294 295 296
		goto out;

	intel_register_dsm_handler();

297
	ret = i915_switcheroo_register(i915);
298 299 300
	if (ret)
		goto cleanup_vga_client;

301
	intel_power_domains_init_hw(i915, false);
302

303
	intel_csr_ucode_init(i915);
304

305
	ret = intel_irq_install(i915);
306 307 308 309 310
	if (ret)
		goto cleanup_csr;

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
311
	ret = intel_modeset_init(i915);
312 313
	if (ret)
		goto cleanup_irq;
314

315
	ret = i915_gem_init(i915);
316
	if (ret)
317
		goto cleanup_modeset;
318

319
	intel_overlay_setup(i915);
320

321
	if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
322 323
		return 0;

324
	ret = intel_fbdev_init(&i915->drm);
325 326 327 328
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
329
	intel_hpd_init(i915);
330

331
	intel_init_ipc(i915);
332

333 334 335
	return 0;

cleanup_gem:
336 337 338
	i915_gem_suspend(i915);
	i915_gem_driver_remove(i915);
	i915_gem_driver_release(i915);
339
cleanup_modeset:
340
	intel_modeset_driver_remove(i915);
341
cleanup_irq:
342
	intel_irq_uninstall(i915);
343
cleanup_csr:
344 345
	intel_csr_ucode_fini(i915);
	intel_power_domains_driver_remove(i915);
346
	i915_switcheroo_unregister(i915);
347
cleanup_vga_client:
348
	intel_vga_unregister(i915);
349 350 351 352
out:
	return ret;
}

353 354
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
355
	intel_modeset_driver_remove(i915);
356

357 358
	intel_irq_uninstall(i915);

359 360
	intel_bios_driver_remove(i915);

361 362
	i915_switcheroo_unregister(i915);

363
	intel_vga_unregister(i915);
364 365 366 367

	intel_csr_ucode_fini(i915);
}

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
388
	 * by the GPU. i915_retire_requests() is called directly when we
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
412
	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
413 414 415 416 417 418 419 420 421 422

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

423 424 425 426
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
427 428 429 430 431
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
432 433 434
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
435 436 437 438
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
439
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
440
	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
441

442
	if (pre) {
443
		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
444
			  "It may not be fully functional.\n");
445 446
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
447 448
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
{
	if (!IS_VALLEYVIEW(i915))
		return 0;

	/* we write all the values in the struct, so no need to zero it out */
	i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
				       GFP_KERNEL);
	if (!i915->vlv_s0ix_state)
		return -ENOMEM;

	return 0;
}

static void vlv_free_s0ix_state(struct drm_i915_private *i915)
{
	if (!i915->vlv_s0ix_state)
		return;

	kfree(i915->vlv_s0ix_state);
	i915->vlv_s0ix_state = NULL;
}

472 473 474 475 476 477
static void sanitize_gpu(struct drm_i915_private *i915)
{
	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
		__intel_gt_reset(&i915->gt, ALL_ENGINES);
}

478
/**
479
 * i915_driver_early_probe - setup state not requiring device access
480 481 482 483 484 485 486 487
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
488
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
489 490 491
{
	int ret = 0;

492
	if (i915_inject_probe_failure(dev_priv))
493 494
		return -ENODEV;

495 496
	intel_device_info_subplatform_init(dev_priv);

497
	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
498
	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
499

500 501 502
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
L
Lyude 已提交
503

504
	mutex_init(&dev_priv->sb_lock);
505 506 507
	pm_qos_add_request(&dev_priv->sb_qos,
			   PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);

508 509 510
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
511
	mutex_init(&dev_priv->hdcp_comp_mutex);
512

513
	i915_memcpy_init_early(dev_priv);
514
	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
515

516 517
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
518
		return ret;
519

520 521 522 523
	ret = vlv_alloc_s0ix_state(dev_priv);
	if (ret < 0)
		goto err_workqueues;

524 525
	intel_wopcm_init_early(&dev_priv->wopcm);

526
	intel_gt_init_early(&dev_priv->gt, dev_priv);
527

528
	i915_gem_init_early(dev_priv);
529

530
	/* This must be called before any calls to HAS_PCH_* */
531
	intel_detect_pch(dev_priv);
532

533
	intel_pm_setup(dev_priv);
534
	intel_init_dpio(dev_priv);
535 536
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
537
		goto err_gem;
538 539 540 541
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);
542
	intel_display_crc_init(dev_priv);
543

544
	intel_detect_preproduction_hw(dev_priv);
545 546 547

	return 0;

548
err_gem:
549
	i915_gem_cleanup_early(dev_priv);
550
	intel_gt_driver_late_release(&dev_priv->gt);
551 552
	vlv_free_s0ix_state(dev_priv);
err_workqueues:
553 554 555 556 557
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
558
 * i915_driver_late_release - cleanup the setup done in
559
 *			       i915_driver_early_probe()
560 561
 * @dev_priv: device private
 */
562
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
563
{
564
	intel_irq_fini(dev_priv);
565
	intel_power_domains_cleanup(dev_priv);
566
	i915_gem_cleanup_early(dev_priv);
567
	intel_gt_driver_late_release(&dev_priv->gt);
568
	vlv_free_s0ix_state(dev_priv);
569
	i915_workqueues_cleanup(dev_priv);
570 571 572

	pm_qos_remove_request(&dev_priv->sb_qos);
	mutex_destroy(&dev_priv->sb_lock);
573 574 575
}

/**
576
 * i915_driver_mmio_probe - setup device MMIO
577 578 579 580 581 582 583
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
584
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
585 586 587
{
	int ret;

588
	if (i915_inject_probe_failure(dev_priv))
589 590
		return -ENODEV;

591
	if (i915_get_bridge_dev(dev_priv))
592 593
		return -EIO;

594
	ret = intel_uncore_init_mmio(&dev_priv->uncore);
595
	if (ret < 0)
596
		goto err_bridge;
597

598 599
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
600

601 602
	intel_device_info_init_mmio(dev_priv);

603
	intel_uncore_prune_mmio_domains(&dev_priv->uncore);
604

605
	intel_uc_init_mmio(&dev_priv->gt.uc);
606

607
	ret = intel_engines_init_mmio(&dev_priv->gt);
608 609 610
	if (ret)
		goto err_uncore;

611 612 613
	/* As early as possible, scrub existing GPU state before clobbering */
	sanitize_gpu(dev_priv);

614 615
	return 0;

616
err_uncore:
617
	intel_teardown_mchbar(dev_priv);
618
	intel_uncore_fini_mmio(&dev_priv->uncore);
619
err_bridge:
620 621 622 623 624 625
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
626
 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
627 628
 * @dev_priv: device private
 */
629
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
630
{
631
	intel_teardown_mchbar(dev_priv);
632
	intel_uncore_fini_mmio(&dev_priv->uncore);
633 634 635
	pci_dev_put(dev_priv->bridge_dev);
}

636 637
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
638
	intel_gvt_sanitize_options(dev_priv);
639 640
}

V
Ville Syrjälä 已提交
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type

static const char *intel_dram_type_str(enum intel_dram_type type)
{
	static const char * const str[] = {
		DRAM_TYPE_STR(UNKNOWN),
		DRAM_TYPE_STR(DDR3),
		DRAM_TYPE_STR(DDR4),
		DRAM_TYPE_STR(LPDDR3),
		DRAM_TYPE_STR(LPDDR4),
	};

	if (type >= ARRAY_SIZE(str))
		type = INTEL_DRAM_UNKNOWN;

	return str[type];
}

#undef DRAM_TYPE_STR

661 662 663 664 665
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
	return dimm->ranks * 64 / (dimm->width ?: 1);
}

666 667
/* Returns total GB for the whole DIMM */
static int skl_get_dimm_size(u16 val)
668
{
669 670 671 672 673 674
	return val & SKL_DRAM_SIZE_MASK;
}

static int skl_get_dimm_width(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
675
		return 0;
676

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	switch (val & SKL_DRAM_WIDTH_MASK) {
	case SKL_DRAM_WIDTH_X8:
	case SKL_DRAM_WIDTH_X16:
	case SKL_DRAM_WIDTH_X32:
		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int skl_get_dimm_ranks(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
		return 0;

	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;

	return val + 1;
697 698
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
/* Returns total GB for the whole DIMM */
static int cnl_get_dimm_size(u16 val)
{
	return (val & CNL_DRAM_SIZE_MASK) / 2;
}

static int cnl_get_dimm_width(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	switch (val & CNL_DRAM_WIDTH_MASK) {
	case CNL_DRAM_WIDTH_X8:
	case CNL_DRAM_WIDTH_X16:
	case CNL_DRAM_WIDTH_X32:
		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int cnl_get_dimm_ranks(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;

	return val + 1;
}

732
static bool
733
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
734
{
735 736
	/* Convert total GB to Gb per DRAM device */
	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
737 738
}

739
static void
740 741
skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
		       struct dram_dimm_info *dimm,
742
		       int channel, char dimm_name, u16 val)
743
{
744 745 746 747 748 749 750 751 752
	if (INTEL_GEN(dev_priv) >= 10) {
		dimm->size = cnl_get_dimm_size(val);
		dimm->width = cnl_get_dimm_width(val);
		dimm->ranks = cnl_get_dimm_ranks(val);
	} else {
		dimm->size = skl_get_dimm_size(val);
		dimm->width = skl_get_dimm_width(val);
		dimm->ranks = skl_get_dimm_ranks(val);
	}
753

754 755 756 757
	drm_dbg_kms(&dev_priv->drm,
		    "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
		    channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
		    yesno(skl_is_16gb_dimm(dimm)));
758
}
759

760
static int
761 762
skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
			  struct dram_channel_info *ch,
763 764
			  int channel, u32 val)
{
765 766 767 768
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
			       channel, 'L', val & 0xffff);
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
			       channel, 'S', val >> 16);
769

770
	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
771
		drm_dbg_kms(&dev_priv->drm, "CH%u not populated\n", channel);
772
		return -EINVAL;
773
	}
774

775
	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
776
		ch->ranks = 2;
777
	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
778
		ch->ranks = 2;
779
	else
780
		ch->ranks = 1;
781

782
	ch->is_16gb_dimm =
783 784
		skl_is_16gb_dimm(&ch->dimm_l) ||
		skl_is_16gb_dimm(&ch->dimm_s);
785

786 787
	drm_dbg_kms(&dev_priv->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
		    channel, ch->ranks, yesno(ch->is_16gb_dimm));
788 789 790 791

	return 0;
}

792
static bool
793 794
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
			const struct dram_channel_info *ch1)
795
{
796
	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
797 798
		(ch0->dimm_s.size == 0 ||
		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
799 800
}

801 802 803 804
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
805
	struct dram_channel_info ch0 = {}, ch1 = {};
806
	u32 val;
807 808
	int ret;

809
	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
810
	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
811 812 813
	if (ret == 0)
		dram_info->num_channels++;

814
	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
815
	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
816 817 818 819
	if (ret == 0)
		dram_info->num_channels++;

	if (dram_info->num_channels == 0) {
820 821
		drm_info(&dev_priv->drm,
			 "Number of memory channels is zero\n");
822 823 824 825 826 827 828 829
		return -EINVAL;
	}

	/*
	 * If any of the channel is single rank channel, worst case output
	 * will be same as if single rank memory, so consider single rank
	 * memory.
	 */
830 831
	if (ch0.ranks == 1 || ch1.ranks == 1)
		dram_info->ranks = 1;
832
	else
833
		dram_info->ranks = max(ch0.ranks, ch1.ranks);
834

835
	if (dram_info->ranks == 0) {
836 837
		drm_info(&dev_priv->drm,
			 "couldn't get memory rank information\n");
838 839
		return -EINVAL;
	}
840

841
	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
842

843
	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
844

845 846
	drm_dbg_kms(&dev_priv->drm, "Memory configuration is symmetric? %s\n",
		    yesno(dram_info->symmetric_memory));
847 848 849
	return 0;
}

V
Ville Syrjälä 已提交
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *dev_priv)
{
	u32 val;

	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);

	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
	case SKL_DRAM_DDR_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case SKL_DRAM_DDR_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case SKL_DRAM_DDR_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case SKL_DRAM_DDR_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

872 873 874 875 876 877 878
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 mem_freq_khz, val;
	int ret;

V
Ville Syrjälä 已提交
879
	dram_info->type = skl_get_dram_type(dev_priv);
880 881
	drm_dbg_kms(&dev_priv->drm, "DRAM type: %s\n",
		    intel_dram_type_str(dram_info->type));
V
Ville Syrjälä 已提交
882

883 884 885 886 887 888 889 890 891 892 893 894
	ret = skl_dram_get_channels_info(dev_priv);
	if (ret)
		return ret;

	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_info->bandwidth_kbps = dram_info->num_channels *
							mem_freq_khz * 8;

	if (dram_info->bandwidth_kbps == 0) {
895 896
		drm_info(&dev_priv->drm,
			 "Couldn't get system memory bandwidth\n");
897 898 899 900 901 902 903
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

904 905 906 907
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size(u32 val)
{
	switch (val & BXT_DRAM_SIZE_MASK) {
908
	case BXT_DRAM_SIZE_4GBIT:
909
		return 4;
910
	case BXT_DRAM_SIZE_6GBIT:
911
		return 6;
912
	case BXT_DRAM_SIZE_8GBIT:
913
		return 8;
914
	case BXT_DRAM_SIZE_12GBIT:
915
		return 12;
916
	case BXT_DRAM_SIZE_16GBIT:
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
		return 16;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int bxt_get_dimm_width(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;

	return 8 << val;
}

static int bxt_get_dimm_ranks(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	switch (val & BXT_DRAM_RANK_MASK) {
	case BXT_DRAM_RANK_SINGLE:
		return 1;
	case BXT_DRAM_RANK_DUAL:
		return 2;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

V
Ville Syrjälä 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
static enum intel_dram_type bxt_get_dimm_type(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return INTEL_DRAM_UNKNOWN;

	switch (val & BXT_DRAM_TYPE_MASK) {
	case BXT_DRAM_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case BXT_DRAM_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case BXT_DRAM_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case BXT_DRAM_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

970 971 972 973 974
static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
			      u32 val)
{
	dimm->width = bxt_get_dimm_width(val);
	dimm->ranks = bxt_get_dimm_ranks(val);
975 976 977 978 979 980

	/*
	 * Size in register is Gb per DRAM device. Convert to total
	 * GB to match the way we report this for non-LP platforms.
	 */
	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
981 982
}

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 dram_channels;
	u32 mem_freq_khz, val;
	u8 num_active_channels;
	int i;

	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
	num_active_channels = hweight32(dram_channels);

	/* Each active bit represents 4-byte channel */
	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);

	if (dram_info->bandwidth_kbps == 0) {
1003 1004
		drm_info(&dev_priv->drm,
			 "Couldn't get system memory bandwidth\n");
1005 1006 1007 1008 1009 1010 1011
		return -EINVAL;
	}

	/*
	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
	 */
	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1012
		struct dram_dimm_info dimm;
V
Ville Syrjälä 已提交
1013
		enum intel_dram_type type;
1014 1015 1016 1017 1018 1019

		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
		if (val == 0xFFFFFFFF)
			continue;

		dram_info->num_channels++;
1020 1021

		bxt_get_dimm_info(&dimm, val);
V
Ville Syrjälä 已提交
1022 1023
		type = bxt_get_dimm_type(val);

1024 1025 1026
		drm_WARN_ON(&dev_priv->drm, type != INTEL_DRAM_UNKNOWN &&
			    dram_info->type != INTEL_DRAM_UNKNOWN &&
			    dram_info->type != type);
1027

1028 1029 1030 1031 1032
		drm_dbg_kms(&dev_priv->drm,
			    "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
			    i - BXT_D_CR_DRP0_DUNIT_START,
			    dimm.size, dimm.width, dimm.ranks,
			    intel_dram_type_str(type));
1033 1034 1035 1036 1037 1038

		/*
		 * If any of the channel is single rank channel,
		 * worst case output will be same as if single rank
		 * memory, so consider single rank memory.
		 */
1039
		if (dram_info->ranks == 0)
1040 1041
			dram_info->ranks = dimm.ranks;
		else if (dimm.ranks == 1)
1042
			dram_info->ranks = 1;
V
Ville Syrjälä 已提交
1043 1044 1045

		if (type != INTEL_DRAM_UNKNOWN)
			dram_info->type = type;
1046 1047
	}

V
Ville Syrjälä 已提交
1048 1049
	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
	    dram_info->ranks == 0) {
1050
		drm_info(&dev_priv->drm, "couldn't get memory information\n");
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	int ret;

1064 1065 1066 1067 1068 1069 1070
	/*
	 * Assume 16Gb DIMMs are present until proven otherwise.
	 * This is only used for the level 0 watermark latency
	 * w/a which does not apply to bxt/glk.
	 */
	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);

1071
	if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
1072 1073
		return;

1074
	if (IS_GEN9_LP(dev_priv))
1075 1076
		ret = bxt_get_dram_info(dev_priv);
	else
1077
		ret = skl_get_dram_info(dev_priv);
1078 1079 1080
	if (ret)
		return;

1081 1082 1083
	drm_dbg_kms(&dev_priv->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
		    dram_info->bandwidth_kbps,
		    dram_info->num_channels);
1084

1085 1086
	drm_dbg_kms(&dev_priv->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
		    dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1087 1088
}

1089 1090
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
1091 1092
	static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
	static const u8 sets[4] = { 1, 1, 2, 2 };
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124

	return EDRAM_NUM_BANKS(cap) *
		ways[EDRAM_WAYS_IDX(cap)] *
		sets[EDRAM_SETS_IDX(cap)];
}

static void edram_detect(struct drm_i915_private *dev_priv)
{
	u32 edram_cap = 0;

	if (!(IS_HASWELL(dev_priv) ||
	      IS_BROADWELL(dev_priv) ||
	      INTEL_GEN(dev_priv) >= 9))
		return;

	edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);

	/* NB: We can't write IDICR yet because we don't have gt funcs set up */

	if (!(edram_cap & EDRAM_ENABLED))
		return;

	/*
	 * The needed capability bits for size calculation are not there with
	 * pre gen9 so return 128MB always.
	 */
	if (INTEL_GEN(dev_priv) < 9)
		dev_priv->edram_size_mb = 128;
	else
		dev_priv->edram_size_mb =
			gen9_edram_size_mb(dev_priv, edram_cap);

1125 1126
	dev_info(dev_priv->drm.dev,
		 "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
1127 1128
}

1129
/**
1130
 * i915_driver_hw_probe - setup state requiring device access
1131 1132 1133 1134 1135
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
1136
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
1137
{
D
David Weinehall 已提交
1138
	struct pci_dev *pdev = dev_priv->drm.pdev;
1139 1140
	int ret;

1141
	if (i915_inject_probe_failure(dev_priv))
1142 1143
		return -ENODEV;

1144
	intel_device_info_runtime_init(dev_priv);
1145

1146 1147
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
1148
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
1149 1150 1151 1152 1153 1154
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

1169
	intel_sanitize_options(dev_priv);
1170

1171 1172 1173
	/* needs to be done before ggtt probe */
	edram_detect(dev_priv);

1174 1175
	i915_perf_init(dev_priv);

1176
	ret = i915_ggtt_probe_hw(dev_priv);
1177
	if (ret)
1178
		goto err_perf;
1179

1180 1181
	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
	if (ret)
1182
		goto err_ggtt;
1183

1184
	ret = i915_ggtt_init_hw(dev_priv);
1185
	if (ret)
1186
		goto err_ggtt;
1187

1188 1189 1190 1191
	ret = intel_memory_regions_hw_probe(dev_priv);
	if (ret)
		goto err_ggtt;

1192
	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
1193

1194
	ret = i915_ggtt_enable_hw(dev_priv);
1195
	if (ret) {
1196
		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
1197
		goto err_mem_regions;
1198 1199
	}

D
David Weinehall 已提交
1200
	pci_set_master(pdev);
1201

1202 1203 1204 1205 1206 1207
	/*
	 * We don't have a max segment size, so set it to the max so sg's
	 * debugging layer doesn't complain
	 */
	dma_set_max_seg_size(&pdev->dev, UINT_MAX);

1208
	/* overlay on gen2 is broken and can't address above 1G */
1209
	if (IS_GEN(dev_priv, 2)) {
D
David Weinehall 已提交
1210
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1211
		if (ret) {
1212
			drm_err(&dev_priv->drm, "failed to set DMA mask\n");
1213

1214
			goto err_mem_regions;
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
		}
	}

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
1226
	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
D
David Weinehall 已提交
1227
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1228 1229

		if (ret) {
1230
			drm_err(&dev_priv->drm, "failed to set DMA mask\n");
1231

1232
			goto err_mem_regions;
1233 1234 1235 1236 1237 1238
		}
	}

	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

1239
	intel_gt_init_workarounds(dev_priv);
1240 1241 1242 1243 1244 1245 1246 1247 1248

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
1249 1250 1251 1252
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
1253 1254 1255 1256 1257 1258
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
1259
	 */
1260
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
1261
		if (pci_enable_msi(pdev) < 0)
1262
			drm_dbg(&dev_priv->drm, "can't enable MSI");
1263 1264
	}

1265 1266
	ret = intel_gvt_init(dev_priv);
	if (ret)
1267 1268 1269
		goto err_msi;

	intel_opregion_setup(dev_priv);
1270 1271 1272 1273 1274 1275
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
	intel_get_dram_info(dev_priv);

1276
	intel_bw_init_hw(dev_priv);
1277

1278 1279
	return 0;

1280 1281 1282 1283
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
	pm_qos_remove_request(&dev_priv->pm_qos);
1284 1285
err_mem_regions:
	intel_memory_regions_driver_release(dev_priv);
1286
err_ggtt:
1287
	i915_ggtt_driver_release(dev_priv);
1288 1289
err_perf:
	i915_perf_fini(dev_priv);
1290 1291 1292 1293
	return ret;
}

/**
1294
 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
1295 1296
 * @dev_priv: device private
 */
1297
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
1298
{
D
David Weinehall 已提交
1299
	struct pci_dev *pdev = dev_priv->drm.pdev;
1300

1301 1302
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
1303 1304
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317

	pm_qos_remove_request(&dev_priv->pm_qos);
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
1318
	struct drm_device *dev = &dev_priv->drm;
1319

1320
	i915_gem_driver_register(dev_priv);
1321
	i915_pmu_register(dev_priv);
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332

	/*
	 * Notify a valid surface after modesetting,
	 * when running inside a VM.
	 */
	if (intel_vgpu_active(dev_priv))
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
D
David Weinehall 已提交
1333
		i915_setup_sysfs(dev_priv);
1334 1335 1336

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
1337
	} else
1338 1339
		drm_err(&dev_priv->drm,
			"Failed to register driver for userspace access!\n");
1340

1341
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
1342 1343 1344 1345 1346
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

1347
	intel_gt_driver_register(&dev_priv->gt);
1348

1349
	intel_audio_init(dev_priv);
1350 1351 1352 1353 1354 1355 1356 1357 1358

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
1359 1360 1361 1362 1363

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
1364
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
1365
		drm_kms_helper_poll_init(dev);
1366

1367
	intel_power_domains_enable(dev_priv);
1368
	intel_runtime_pm_enable(&dev_priv->runtime_pm);
1369 1370 1371 1372 1373 1374 1375 1376
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
1377
	intel_runtime_pm_disable(&dev_priv->runtime_pm);
1378
	intel_power_domains_disable(dev_priv);
1379

1380
	intel_fbdev_unregister(dev_priv);
1381
	intel_audio_deinit(dev_priv);
1382

1383 1384 1385 1386 1387 1388 1389
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

1390
	intel_gt_driver_unregister(&dev_priv->gt);
1391 1392 1393
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

1394
	i915_perf_unregister(dev_priv);
1395
	i915_pmu_unregister(dev_priv);
1396

D
David Weinehall 已提交
1397
	i915_teardown_sysfs(dev_priv);
1398
	drm_dev_unplug(&dev_priv->drm);
1399

1400
	i915_gem_driver_unregister(dev_priv);
1401 1402
}

1403 1404
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
1405
	if (drm_debug_enabled(DRM_UT_DRIVER)) {
1406 1407
		struct drm_printer p = drm_debug_printer("i915 device info:");

1408
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
1409 1410 1411
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
1412 1413
			   intel_subplatform(RUNTIME_INFO(dev_priv),
					     INTEL_INFO(dev_priv)->platform),
1414 1415
			   INTEL_GEN(dev_priv));

1416 1417
		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
1418 1419 1420
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1421
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
1422
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1423
		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
1424
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1425 1426
		drm_info(&dev_priv->drm,
			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1427 1428
}

1429 1430 1431 1432 1433 1434 1435
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;
1436
	int err;
1437 1438 1439

	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
	if (!i915)
1440
		return ERR_PTR(-ENOMEM);
1441

1442 1443
	err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
	if (err) {
1444
		kfree(i915);
1445
		return ERR_PTR(err);
1446 1447 1448
	}

	i915->drm.dev_private = i915;
1449 1450 1451

	i915->drm.pdev = pdev;
	pci_set_drvdata(pdev, i915);
1452 1453 1454 1455

	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
1456
	RUNTIME_INFO(i915)->device_id = pdev->device;
1457

1458
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1459 1460 1461 1462

	return i915;
}

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
static void i915_driver_destroy(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;

	drm_dev_fini(&i915->drm);
	kfree(i915);

	/* And make sure we never chase our dangling pointer from pci_dev */
	pci_set_drvdata(pdev, NULL);
}

1474
/**
1475
 * i915_driver_probe - setup chip and create an initial config
1476 1477
 * @pdev: PCI device
 * @ent: matching PCI ID entry
1478
 *
1479
 * The driver probe routine has to do several things:
1480 1481 1482 1483 1484
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
1485
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1486
{
1487 1488
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
1489 1490
	struct drm_i915_private *dev_priv;
	int ret;
1491

1492
	dev_priv = i915_driver_create(pdev, ent);
1493 1494
	if (IS_ERR(dev_priv))
		return PTR_ERR(dev_priv);
1495

1496 1497 1498 1499
	/* Disable nuclear pageflip by default on pre-ILK */
	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;

1500 1501 1502 1503
	/*
	 * Check if we support fake LMEM -- for now we only unleash this for
	 * the live selftests(test-and-exit).
	 */
1504
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
		if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
		    i915_modparams.fake_lmem_start) {
			mkwrite_device_info(dev_priv)->memory_regions =
				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
			mkwrite_device_info(dev_priv)->is_dgfx = true;
			GEM_BUG_ON(!HAS_LMEM(dev_priv));
			GEM_BUG_ON(!IS_DGFX(dev_priv));
		}
	}
1515
#endif
1516

1517 1518
	ret = pci_enable_device(pdev);
	if (ret)
1519
		goto out_fini;
D
Damien Lespiau 已提交
1520

1521
	ret = i915_driver_early_probe(dev_priv);
1522 1523
	if (ret < 0)
		goto out_pci_disable;
1524

1525
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
L
Linus Torvalds 已提交
1526

1527 1528
	i915_detect_vgpu(dev_priv);

1529
	ret = i915_driver_mmio_probe(dev_priv);
1530 1531
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
1532

1533
	ret = i915_driver_hw_probe(dev_priv);
1534 1535
	if (ret < 0)
		goto out_cleanup_mmio;
1536

1537
	ret = i915_driver_modeset_probe(dev_priv);
1538
	if (ret < 0)
1539
		goto out_cleanup_hw;
1540 1541 1542

	i915_driver_register(dev_priv);

1543
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1544

1545 1546
	i915_welcome_messages(dev_priv);

1547 1548 1549
	return 0;

out_cleanup_hw:
1550
	i915_driver_hw_remove(dev_priv);
1551
	intel_memory_regions_driver_release(dev_priv);
1552
	i915_ggtt_driver_release(dev_priv);
1553
out_cleanup_mmio:
1554
	i915_driver_mmio_release(dev_priv);
1555
out_runtime_pm_put:
1556
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1557
	i915_driver_late_release(dev_priv);
1558 1559
out_pci_disable:
	pci_disable_device(pdev);
1560
out_fini:
1561
	i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
1562
	i915_driver_destroy(dev_priv);
1563 1564 1565
	return ret;
}

1566
void i915_driver_remove(struct drm_i915_private *i915)
1567
{
1568
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1569

1570
	i915_driver_unregister(i915);
1571

1572 1573 1574 1575 1576
	/*
	 * After unregistering the device to prevent any new users, cancel
	 * all in-flight requests so that we can quickly unbind the active
	 * resources.
	 */
1577
	intel_gt_set_wedged(&i915->gt);
1578

1579 1580 1581
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

1582
	i915_gem_suspend(i915);
B
Ben Widawsky 已提交
1583

1584
	drm_atomic_helper_shutdown(&i915->drm);
1585

1586
	intel_gvt_driver_remove(i915);
1587

1588
	i915_driver_modeset_remove(i915);
1589

1590 1591
	i915_reset_error_state(i915);
	i915_gem_driver_remove(i915);
1592

1593
	intel_power_domains_driver_remove(i915);
1594

1595
	i915_driver_hw_remove(i915);
1596

1597
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1598 1599 1600 1601 1602
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
1603
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1604

1605
	disable_rpm_wakeref_asserts(rpm);
1606

1607
	i915_gem_driver_release(dev_priv);
1608

1609
	intel_memory_regions_driver_release(dev_priv);
1610
	i915_ggtt_driver_release(dev_priv);
1611

1612
	i915_driver_mmio_release(dev_priv);
1613

1614
	enable_rpm_wakeref_asserts(rpm);
1615
	intel_runtime_pm_driver_release(rpm);
1616

1617
	i915_driver_late_release(dev_priv);
1618
	i915_driver_destroy(dev_priv);
1619 1620
}

1621
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1622
{
1623
	struct drm_i915_private *i915 = to_i915(dev);
1624
	int ret;
1625

1626
	ret = i915_gem_open(i915, file);
1627 1628
	if (ret)
		return ret;
1629

1630 1631
	return 0;
}
1632

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1650

1651
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1652
{
1653 1654
	struct drm_i915_file_private *file_priv = file->driver_priv;

1655
	i915_gem_context_close(file);
1656 1657
	i915_gem_release(dev, file);

1658
	kfree_rcu(file_priv, rcu);
1659 1660 1661

	/* Catch up with all the deferred frees from "this" client */
	i915_gem_flush_free_objects(to_i915(dev));
1662 1663
}

1664 1665
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1666
	struct drm_device *dev = &dev_priv->drm;
1667
	struct intel_encoder *encoder;
1668 1669

	drm_modeset_lock_all(dev);
1670 1671 1672
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1673 1674 1675
	drm_modeset_unlock_all(dev);
}

1676 1677
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
1678
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1679

1680 1681 1682 1683 1684 1685 1686 1687
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1688

1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
1699
	i915_gem_suspend(i915);
1700

1701
	return 0;
1702 1703
}

1704
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1705
{
1706
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1707
	struct pci_dev *pdev = dev_priv->drm.pdev;
1708
	pci_power_t opregion_target_state;
1709

1710
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1711

1712 1713
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1714
	intel_power_domains_disable(dev_priv);
1715

1716 1717
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
1718
	pci_save_state(pdev);
J
Jesse Barnes 已提交
1719

1720
	intel_display_suspend(dev);
1721

1722
	intel_dp_mst_suspend(dev_priv);
1723

1724 1725
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1726

1727
	intel_suspend_encoders(dev_priv);
1728

1729
	intel_suspend_hw(dev_priv);
1730

1731
	i915_ggtt_suspend(&dev_priv->ggtt);
1732

1733
	i915_save_state(dev_priv);
1734

1735
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1736
	intel_opregion_suspend(dev_priv, opregion_target_state);
1737

1738
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1739

1740 1741
	dev_priv->suspend_count++;

1742
	intel_csr_ucode_suspend(dev_priv);
1743

1744
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1745

1746
	return 0;
1747 1748
}

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

1761
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1762
{
1763
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1764
	struct pci_dev *pdev = dev_priv->drm.pdev;
1765
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1766
	int ret = 0;
1767

1768
	disable_rpm_wakeref_asserts(rpm);
1769

1770 1771
	i915_gem_suspend_late(dev_priv);

1772
	intel_uncore_suspend(&dev_priv->uncore);
1773

1774 1775
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
1776

1777 1778 1779
	intel_display_power_suspend_late(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1780
		ret = vlv_suspend_complete(dev_priv);
1781 1782

	if (ret) {
1783
		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1784
		intel_power_domains_resume(dev_priv);
1785

1786
		goto out;
1787 1788
	}

D
David Weinehall 已提交
1789
	pci_disable_device(pdev);
1790
	/*
1791
	 * During hibernation on some platforms the BIOS may try to access
1792 1793
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1794 1795 1796 1797 1798 1799 1800
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1801
	 */
1802
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
1803
		pci_set_power_state(pdev, PCI_D3hot);
1804

1805
out:
1806
	enable_rpm_wakeref_asserts(rpm);
1807
	if (!dev_priv->uncore.user_forcewake_count)
1808
		intel_runtime_pm_driver_release(rpm);
1809 1810

	return ret;
1811 1812
}

1813
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1814 1815 1816
{
	int error;

1817 1818
	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
			     state.event != PM_EVENT_FREEZE))
1819
		return -EINVAL;
1820

1821
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1822
		return 0;
1823

1824
	error = i915_drm_suspend(&i915->drm);
1825 1826 1827
	if (error)
		return error;

1828
	return i915_drm_suspend_late(&i915->drm, false);
J
Jesse Barnes 已提交
1829 1830
}

1831
static int i915_drm_resume(struct drm_device *dev)
1832
{
1833
	struct drm_i915_private *dev_priv = to_i915(dev);
1834
	int ret;
1835

1836
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1837

1838 1839
	sanitize_gpu(dev_priv);

1840
	ret = i915_ggtt_enable_hw(dev_priv);
1841
	if (ret)
1842
		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1843

1844
	i915_ggtt_resume(&dev_priv->ggtt);
1845
	i915_gem_restore_fences(&dev_priv->ggtt);
1846

1847 1848
	intel_csr_ucode_resume(dev_priv);

1849
	i915_restore_state(dev_priv);
1850
	intel_pps_unlock_regs_wa(dev_priv);
1851

1852
	intel_init_pch_refclk(dev_priv);
1853

1854 1855 1856 1857 1858
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
1859 1860
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
1861 1862 1863 1864 1865
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1866 1867
	drm_mode_config_reset(dev);

1868
	i915_gem_resume(dev_priv);
1869

1870
	intel_modeset_init_hw(dev_priv);
1871
	intel_init_clock_gating(dev_priv);
1872

1873 1874
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
1875
		dev_priv->display.hpd_irq_setup(dev_priv);
1876
	spin_unlock_irq(&dev_priv->irq_lock);
1877

1878
	intel_dp_mst_resume(dev_priv);
1879

1880 1881
	intel_display_resume(dev);

1882 1883
	drm_kms_helper_poll_enable(dev);

1884 1885 1886
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
1887
	 * bother with the tiny race here where we might lose hotplug
1888 1889 1890
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
1891

1892
	intel_opregion_resume(dev_priv);
1893

1894
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1895

1896 1897
	intel_power_domains_enable(dev_priv);

1898
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1899

1900
	return 0;
1901 1902
}

1903
static int i915_drm_resume_early(struct drm_device *dev)
1904
{
1905
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1906
	struct pci_dev *pdev = dev_priv->drm.pdev;
1907
	int ret;
1908

1909 1910 1911 1912 1913 1914 1915 1916 1917
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
1929
	ret = pci_set_power_state(pdev, PCI_D0);
1930
	if (ret) {
1931 1932
		drm_err(&dev_priv->drm,
			"failed to set PCI D0 power state (%d)\n", ret);
1933
		return ret;
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
1949 1950
	if (pci_enable_device(pdev))
		return -EIO;
1951

D
David Weinehall 已提交
1952
	pci_set_master(pdev);
1953

1954
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1955

1956
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1957
		ret = vlv_resume_prepare(dev_priv, false);
1958
	if (ret)
1959 1960 1961
		drm_err(&dev_priv->drm,
			"Resume prepare failed: %d, continuing anyway\n",
			ret);
1962

1963 1964
	intel_uncore_resume_early(&dev_priv->uncore);

1965
	intel_gt_check_and_clear_faults(&dev_priv->gt);
1966

1967
	intel_display_power_resume_early(dev_priv);
1968

1969
	intel_power_domains_resume(dev_priv);
1970

1971
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1972

1973
	return ret;
1974 1975
}

1976
int i915_resume_switcheroo(struct drm_i915_private *i915)
1977
{
1978
	int ret;
1979

1980
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1981 1982
		return 0;

1983
	ret = i915_drm_resume_early(&i915->drm);
1984 1985 1986
	if (ret)
		return ret;

1987
	return i915_drm_resume(&i915->drm);
1988 1989
}

1990 1991
static int i915_pm_prepare(struct device *kdev)
{
1992
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1993

1994
	if (!i915) {
1995 1996 1997 1998
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

1999
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2000 2001
		return 0;

2002
	return i915_drm_prepare(&i915->drm);
2003 2004
}

2005
static int i915_pm_suspend(struct device *kdev)
2006
{
2007
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2008

2009
	if (!i915) {
2010
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2011 2012
		return -ENODEV;
	}
2013

2014
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2015 2016
		return 0;

2017
	return i915_drm_suspend(&i915->drm);
2018 2019
}

2020
static int i915_pm_suspend_late(struct device *kdev)
2021
{
2022
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2023 2024

	/*
D
Damien Lespiau 已提交
2025
	 * We have a suspend ordering issue with the snd-hda driver also
2026 2027 2028 2029 2030 2031 2032
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
2033
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2034
		return 0;
2035

2036
	return i915_drm_suspend_late(&i915->drm, false);
2037 2038
}

2039
static int i915_pm_poweroff_late(struct device *kdev)
2040
{
2041
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2042

2043
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2044 2045
		return 0;

2046
	return i915_drm_suspend_late(&i915->drm, true);
2047 2048
}

2049
static int i915_pm_resume_early(struct device *kdev)
2050
{
2051
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2052

2053
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2054 2055
		return 0;

2056
	return i915_drm_resume_early(&i915->drm);
2057 2058
}

2059
static int i915_pm_resume(struct device *kdev)
2060
{
2061
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2062

2063
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2064 2065
		return 0;

2066
	return i915_drm_resume(&i915->drm);
2067 2068
}

2069
/* freeze: before creating the hibernation_image */
2070
static int i915_pm_freeze(struct device *kdev)
2071
{
2072
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2073 2074
	int ret;

2075 2076
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(&i915->drm);
2077 2078 2079
		if (ret)
			return ret;
	}
2080

2081
	ret = i915_gem_freeze(i915);
2082 2083 2084 2085
	if (ret)
		return ret;

	return 0;
2086 2087
}

2088
static int i915_pm_freeze_late(struct device *kdev)
2089
{
2090
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2091 2092
	int ret;

2093 2094
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(&i915->drm, true);
2095 2096 2097
		if (ret)
			return ret;
	}
2098

2099
	ret = i915_gem_freeze_late(i915);
2100 2101 2102 2103
	if (ret)
		return ret;

	return 0;
2104 2105 2106
}

/* thaw: called after creating the hibernation image, but before turning off. */
2107
static int i915_pm_thaw_early(struct device *kdev)
2108
{
2109
	return i915_pm_resume_early(kdev);
2110 2111
}

2112
static int i915_pm_thaw(struct device *kdev)
2113
{
2114
	return i915_pm_resume(kdev);
2115 2116 2117
}

/* restore: called after loading the hibernation image. */
2118
static int i915_pm_restore_early(struct device *kdev)
2119
{
2120
	return i915_pm_resume_early(kdev);
2121 2122
}

2123
static int i915_pm_restore(struct device *kdev)
2124
{
2125
	return i915_pm_resume(kdev);
2126 2127
}

2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
2156
	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2157 2158
	int i;

2159 2160 2161
	if (!s)
		return;

2162 2163 2164 2165 2166 2167 2168 2169
	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2170
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2171 2172

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2173
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2214
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
2226
	s->pcbr			= I915_READ(VLV_PCBR);
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
2240
	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2241 2242 2243
	u32 val;
	int i;

2244 2245 2246
	if (!s)
		return;

2247 2248 2249 2250 2251 2252 2253 2254
	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2255
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2256 2257

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2258
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2299
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
2324
	I915_WRITE(VLV_PCBR,			s->pcbr);
2325 2326 2327
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

2328
static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
2329 2330
				  u32 mask, u32 val)
{
2331 2332 2333 2334
	i915_reg_t reg = VLV_GTLC_PW_STATUS;
	u32 reg_value;
	int ret;

2335 2336 2337 2338 2339 2340 2341
	/* The HW does not like us polling for PW_STATUS frequently, so
	 * use the sleeping loop rather than risk the busy spin within
	 * intel_wait_for_register().
	 *
	 * Transitioning between RC6 states should be at most 2ms (see
	 * valleyview_enable_rps) so use a 3ms timeout.
	 */
2342 2343 2344
	ret = wait_for(((reg_value =
			 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
		       == val, 3);
2345 2346 2347 2348 2349

	/* just trace the final value */
	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);

	return ret;
2350 2351
}

2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

2366
	err = intel_wait_for_register(&dev_priv->uncore,
2367 2368 2369 2370
				      VLV_GTLC_SURVIVABILITY_REG,
				      VLV_GFX_CLK_STATUS_BIT,
				      VLV_GFX_CLK_STATUS_BIT,
				      20);
2371
	if (err)
2372 2373 2374
		drm_err(&dev_priv->drm,
			"timeout waiting for GFX clock force-on (%08x)\n",
			I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2375 2376 2377 2378

	return err;
}

2379 2380
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
2381
	u32 mask;
2382
	u32 val;
2383
	int err;
2384 2385 2386 2387 2388 2389 2390 2391

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

2392 2393 2394 2395
	mask = VLV_GTLC_ALLOWWAKEACK;
	val = allow ? mask : 0;

	err = vlv_wait_for_pw_status(dev_priv, mask, val);
2396
	if (err)
2397
		drm_err(&dev_priv->drm, "timeout disabling GT waking\n");
2398

2399 2400 2401
	return err;
}

2402 2403
static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				  bool wait_for_on)
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
{
	u32 mask;
	u32 val;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
2414 2415 2416
	 *
	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
	 * reset and we are trying to force the machine to sleep.
2417
	 */
2418
	if (vlv_wait_for_pw_status(dev_priv, mask, val))
2419 2420 2421
		drm_dbg(&dev_priv->drm,
			"timeout waiting for GT wells to go %s\n",
			onoff(wait_for_on));
2422 2423 2424 2425 2426 2427 2428
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

2429 2430
	drm_dbg(&dev_priv->drm,
		"GT register access while GT waking disabled\n");
2431 2432 2433
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

2434
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2435 2436 2437 2438 2439 2440 2441 2442
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
2443
	vlv_wait_for_gt_wells(dev_priv, false);
2444 2445

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2446 2447
	drm_WARN_ON(&dev_priv->drm,
		    (I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
2458

2459
	vlv_save_gunit_s0ix_state(dev_priv);
2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

2476 2477
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
{
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

2489
	vlv_restore_gunit_s0ix_state(dev_priv);
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

2501
	if (rpm_resume)
2502
		intel_init_clock_gating(dev_priv);
2503 2504 2505 2506

	return ret;
}

2507
static int intel_runtime_suspend(struct device *kdev)
2508
{
2509
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2510
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2511
	int ret = 0;
2512

2513
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
2514 2515
		return -ENODEV;

2516
	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
2517

2518
	disable_rpm_wakeref_asserts(rpm);
2519

2520 2521 2522 2523
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
2524
	i915_gem_runtime_suspend(dev_priv);
2525

2526
	intel_gt_runtime_suspend(&dev_priv->gt);
2527

2528
	intel_runtime_pm_disable_interrupts(dev_priv);
2529

2530
	intel_uncore_suspend(&dev_priv->uncore);
2531

2532 2533 2534
	intel_display_power_suspend(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2535 2536
		ret = vlv_suspend_complete(dev_priv);

2537
	if (ret) {
2538 2539
		drm_err(&dev_priv->drm,
			"Runtime suspend failed, disabling it (%d)\n", ret);
2540
		intel_uncore_runtime_resume(&dev_priv->uncore);
2541

2542
		intel_runtime_pm_enable_interrupts(dev_priv);
2543

2544
		intel_gt_runtime_resume(&dev_priv->gt);
2545

2546
		i915_gem_restore_fences(&dev_priv->ggtt);
2547

2548
		enable_rpm_wakeref_asserts(rpm);
2549

2550 2551
		return ret;
	}
2552

2553
	enable_rpm_wakeref_asserts(rpm);
2554
	intel_runtime_pm_driver_release(rpm);
2555

2556
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
2557 2558
		drm_err(&dev_priv->drm,
			"Unclaimed access detected prior to suspending\n");
2559

2560
	rpm->suspended = true;
2561 2562

	/*
2563 2564
	 * FIXME: We really should find a document that references the arguments
	 * used below!
2565
	 */
2566
	if (IS_BROADWELL(dev_priv)) {
2567 2568 2569 2570 2571 2572
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
2573
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2574
	} else {
2575 2576 2577 2578 2579 2580 2581
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
2582
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
2583
	}
2584

2585
	assert_forcewakes_inactive(&dev_priv->uncore);
2586

2587
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2588 2589
		intel_hpd_poll_init(dev_priv);

2590
	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
2591 2592 2593
	return 0;
}

2594
static int intel_runtime_resume(struct device *kdev)
2595
{
2596
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2597
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2598
	int ret = 0;
2599

2600
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
2601
		return -ENODEV;
2602

2603
	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
2604

2605
	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
2606
	disable_rpm_wakeref_asserts(rpm);
2607

2608
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
2609
	rpm->suspended = false;
2610
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
2611 2612
		drm_dbg(&dev_priv->drm,
			"Unclaimed access during suspend, bios?\n");
2613

2614 2615 2616
	intel_display_power_resume(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2617 2618
		ret = vlv_resume_prepare(dev_priv, true);

2619
	intel_uncore_runtime_resume(&dev_priv->uncore);
2620

2621 2622
	intel_runtime_pm_enable_interrupts(dev_priv);

2623 2624 2625 2626
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
2627
	intel_gt_runtime_resume(&dev_priv->gt);
2628
	i915_gem_restore_fences(&dev_priv->ggtt);
2629

2630 2631 2632 2633 2634
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
2635
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2636 2637
		intel_hpd_init(dev_priv);

2638 2639
	intel_enable_ipc(dev_priv);

2640
	enable_rpm_wakeref_asserts(rpm);
2641

2642
	if (ret)
2643 2644
		drm_err(&dev_priv->drm,
			"Runtime resume failed, disabling it (%d)\n", ret);
2645
	else
2646
		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
2647 2648

	return ret;
2649 2650
}

2651
const struct dev_pm_ops i915_pm_ops = {
2652 2653 2654 2655
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
2656
	.prepare = i915_pm_prepare,
2657
	.suspend = i915_pm_suspend,
2658 2659
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
2660
	.resume = i915_pm_resume,
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
2677 2678 2679 2680
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
2681
	.poweroff = i915_pm_suspend,
2682
	.poweroff_late = i915_pm_poweroff_late,
2683 2684
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
2685 2686

	/* S0ix (via runtime suspend) event handlers */
2687 2688
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
2689 2690
};

2691 2692 2693 2694 2695
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
2696
	.mmap = i915_gem_mmap,
2697 2698 2699 2700 2701 2702
	.poll = drm_poll,
	.read = drm_read,
	.compat_ioctl = i915_compat_ioctl,
	.llseek = noop_llseek,
};

2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2717
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2729
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2730
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
2731 2732
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2733
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
2734 2735
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2736
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
2737 2738 2739 2740 2741 2742
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2743
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
2744 2745
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2746 2747
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2748
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2749
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
2750
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
2751 2752 2753 2754
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
2755
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
2756
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2757 2758 2759 2760 2761 2762
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2763
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2764 2765 2766
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
2767 2768
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
2769 2770
};

L
Linus Torvalds 已提交
2771
static struct drm_driver driver = {
2772 2773
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
2774
	 */
2775
	.driver_features =
2776
	    DRIVER_GEM |
2777
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
2778
	.release = i915_driver_release,
2779
	.open = i915_driver_open,
2780
	.lastclose = i915_driver_lastclose,
2781
	.postclose = i915_driver_postclose,
2782

2783
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
2784
	.gem_free_object_unlocked = i915_gem_free_object,
2785 2786 2787 2788 2789 2790

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

2791 2792 2793
	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
	.get_scanout_position = i915_get_crtc_scanoutpos,

2794
	.dumb_create = i915_gem_dumb_create,
2795 2796
	.dumb_map_offset = i915_gem_dumb_mmap_offset,

L
Linus Torvalds 已提交
2797
	.ioctls = i915_ioctls,
2798
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
2799
	.fops = &i915_driver_fops,
2800 2801 2802 2803 2804 2805
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
2806
};