i915_drv.c 77.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/acpi.h>
31 32
#include <linux/device.h>
#include <linux/oom.h>
33
#include <linux/module.h>
34 35
#include <linux/pci.h>
#include <linux/pm.h>
36
#include <linux/pm_runtime.h>
37 38 39
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
40
#include <linux/vga_switcheroo.h>
41 42 43
#include <linux/vt.h>
#include <acpi/video.h>

44
#include <drm/drm_atomic_helper.h>
45 46 47
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
48 49
#include <drm/i915_drm.h>

50 51 52 53
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
54
#include "display/intel_display_types.h"
55
#include "display/intel_dp.h"
56
#include "display/intel_fbdev.h"
57
#include "display/intel_gmbus.h"
58 59 60 61
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
62

63
#include "gem/i915_gem_context.h"
64
#include "gem/i915_gem_ioctls.h"
65
#include "gt/intel_gt.h"
66
#include "gt/intel_gt_pm.h"
67

68
#include "i915_debugfs.h"
69
#include "i915_drv.h"
70
#include "i915_irq.h"
71
#include "i915_memcpy.h"
72
#include "i915_perf.h"
L
Lionel Landwerlin 已提交
73
#include "i915_query.h"
74
#include "i915_suspend.h"
75
#include "i915_sysfs.h"
76
#include "i915_trace.h"
77
#include "i915_vgpu.h"
78
#include "intel_csr.h"
79
#include "intel_pm.h"
J
Jesse Barnes 已提交
80

81 82
static struct drm_driver driver;

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
struct vlv_s0ix_state {
	/* GAM */
	u32 wr_watermark;
	u32 gfx_prio_ctrl;
	u32 arb_mode;
	u32 gfx_pend_tlb0;
	u32 gfx_pend_tlb1;
	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
	u32 media_max_req_count;
	u32 gfx_max_req_count;
	u32 render_hwsp;
	u32 ecochk;
	u32 bsd_hwsp;
	u32 blt_hwsp;
	u32 tlb_rd_addr;

	/* MBC */
	u32 g3dctl;
	u32 gsckgctl;
	u32 mbctl;

	/* GCP */
	u32 ucgctl1;
	u32 ucgctl3;
	u32 rcgctl1;
	u32 rcgctl2;
	u32 rstctl;
	u32 misccpctl;

	/* GPM */
	u32 gfxpause;
	u32 rpdeuhwtc;
	u32 rpdeuc;
	u32 ecobus;
	u32 pwrdwnupctl;
	u32 rp_down_timeout;
	u32 rp_deucsw;
	u32 rcubmabdtmr;
	u32 rcedata;
	u32 spare2gh;

	/* Display 1 CZ domain */
	u32 gt_imr;
	u32 gt_ier;
	u32 pm_imr;
	u32 pm_ier;
	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];

	/* GT SA CZ domain */
	u32 tilectl;
	u32 gt_fifoctl;
	u32 gtlc_wake_ctrl;
	u32 gtlc_survive;
	u32 pmwgicz;

	/* Display 2 CZ domain */
	u32 gu_ctl0;
	u32 gu_ctl1;
	u32 pcbr;
	u32 clock_gate_dis2;
};

145
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
146
{
147 148 149 150
	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);

	dev_priv->bridge_dev =
		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
151 152 153 154 155 156 157 158 159
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
160
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
161
{
162
	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
163 164 165 166
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

167
	if (INTEL_GEN(dev_priv) >= 4)
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
		return ret;
	}

194
	if (INTEL_GEN(dev_priv) >= 4)
195 196 197 198 199 200 201 202 203 204
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
205
intel_setup_mchbar(struct drm_i915_private *dev_priv)
206
{
207
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
208 209 210
	u32 temp;
	bool enabled;

211
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
212 213 214 215
		return;

	dev_priv->mchbar_need_disable = false;

216
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
217 218 219 220 221 222 223 224 225 226 227
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

228
	if (intel_alloc_mchbar_resource(dev_priv))
229 230 231 232 233
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
234
	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
235 236 237 238 239 240 241 242 243
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
244
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
245
{
246
	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
247 248

	if (dev_priv->mchbar_need_disable) {
249
		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
275
	struct drm_i915_private *dev_priv = cookie;
276

277
	intel_modeset_vga_set_state(dev_priv, state);
278 279 280 281 282 283 284
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

285 286 287
static int i915_resume_switcheroo(struct drm_i915_private *i915);
static int i915_suspend_switcheroo(struct drm_i915_private *i915,
				   pm_message_t state);
288

289 290
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
291
	struct drm_i915_private *i915 = pdev_to_i915(pdev);
292 293
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };

294 295 296 297 298
	if (!i915) {
		dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
		return;
	}

299 300
	if (state == VGA_SWITCHEROO_ON) {
		pr_info("switched on\n");
301
		i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
302
		/* i915 resume handler doesn't set to D0 */
D
David Weinehall 已提交
303
		pci_set_power_state(pdev, PCI_D0);
304 305
		i915_resume_switcheroo(i915);
		i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
306 307
	} else {
		pr_info("switched off\n");
308 309 310
		i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
		i915_suspend_switcheroo(i915, pmm);
		i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
311 312 313 314 315
	}
}

static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
316
	struct drm_i915_private *i915 = pdev_to_i915(pdev);
317 318 319 320 321 322

	/*
	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
	 * locking inversion with the driver load path. And the access here is
	 * completely racy anyway. So don't bother with locking for now.
	 */
323
	return i915 && i915->drm.open_count == 0;
324 325 326 327 328 329 330 331
}

static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
	.set_gpu_state = i915_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = i915_switcheroo_can_switch,
};

332
static int i915_driver_modeset_probe(struct drm_i915_private *i915)
333
{
334
	struct pci_dev *pdev = i915->drm.pdev;
335 336
	int ret;

337
	if (i915_inject_probe_failure(i915))
338 339
		return -ENODEV;

340 341 342
	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
		ret = drm_vblank_init(&i915->drm,
				      INTEL_NUM_PIPES(i915));
343 344 345 346
		if (ret)
			goto out;
	}

347
	intel_bios_init(i915);
348 349 350 351 352 353 354 355

	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
356
	ret = vga_client_register(pdev, i915, NULL, i915_vga_set_decode);
357 358 359 360 361
	if (ret && ret != -ENODEV)
		goto out;

	intel_register_dsm_handler();

D
David Weinehall 已提交
362
	ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
363 364 365 366
	if (ret)
		goto cleanup_vga_client;

	/* must happen before intel_power_domains_init_hw() on VLV/CHV */
367
	intel_update_rawclk(i915);
368

369
	intel_power_domains_init_hw(i915, false);
370

371
	intel_csr_ucode_init(i915);
372

373
	ret = intel_irq_install(i915);
374 375 376
	if (ret)
		goto cleanup_csr;

377
	intel_gmbus_setup(i915);
378 379 380

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
381
	ret = intel_modeset_init(i915);
382 383
	if (ret)
		goto cleanup_irq;
384

385
	ret = i915_gem_init(i915);
386
	if (ret)
387
		goto cleanup_modeset;
388

389
	intel_overlay_setup(i915);
390

391
	if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
392 393
		return 0;

394
	ret = intel_fbdev_init(&i915->drm);
395 396 397 398
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
399
	intel_hpd_init(i915);
400

401
	intel_init_ipc(i915);
402

403 404 405
	return 0;

cleanup_gem:
406 407 408
	i915_gem_suspend(i915);
	i915_gem_driver_remove(i915);
	i915_gem_driver_release(i915);
409
cleanup_modeset:
410
	intel_modeset_driver_remove(i915);
411
cleanup_irq:
412 413
	intel_irq_uninstall(i915);
	intel_gmbus_teardown(i915);
414
cleanup_csr:
415 416
	intel_csr_ucode_fini(i915);
	intel_power_domains_driver_remove(i915);
D
David Weinehall 已提交
417
	vga_switcheroo_unregister_client(pdev);
418
cleanup_vga_client:
D
David Weinehall 已提交
419
	vga_client_register(pdev, NULL, NULL, NULL);
420 421 422 423 424 425 426
out:
	return ret;
}

static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	struct apertures_struct *ap;
427
	struct pci_dev *pdev = dev_priv->drm.pdev;
428 429 430 431 432 433 434 435
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	bool primary;
	int ret;

	ap = alloc_apertures(1);
	if (!ap)
		return -ENOMEM;

436
	ap->ranges[0].base = ggtt->gmadr.start;
437 438 439 440 441
	ap->ranges[0].size = ggtt->mappable_end;

	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

442
	ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
443 444 445 446 447 448

	kfree(ap);

	return ret;
}

449 450 451 452
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;

453
	intel_modeset_driver_remove(i915);
454 455 456 457 458 459 460 461 462

	intel_bios_driver_remove(i915);

	vga_switcheroo_unregister_client(pdev);
	vga_client_register(pdev, NULL, NULL, NULL);

	intel_csr_ucode_fini(i915);
}

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
483
	 * by the GPU. i915_retire_requests() is called directly when we
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	return 0;

out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
	DRM_ERROR("Failed to allocate workqueues.\n");

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

518 519 520 521
/*
 * We don't keep the workarounds for pre-production hardware, so we expect our
 * driver to fail on these machines in one way or another. A little warning on
 * dmesg may help both the user and the bug triagers.
522 523 524 525 526
 *
 * Our policy for removing pre-production workarounds is to keep the
 * current gen workarounds as a guide to the bring-up of the next gen
 * (workarounds have a habit of persisting!). Anything older than that
 * should be removed along with the complications they introduce.
527 528 529
 */
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
530 531 532 533
	bool pre = false;

	pre |= IS_HSW_EARLY_SDV(dev_priv);
	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
534
	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
535
	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
536

537
	if (pre) {
538 539
		DRM_ERROR("This is a pre-production stepping. "
			  "It may not be fully functional.\n");
540 541
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
	}
542 543
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
{
	if (!IS_VALLEYVIEW(i915))
		return 0;

	/* we write all the values in the struct, so no need to zero it out */
	i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
				       GFP_KERNEL);
	if (!i915->vlv_s0ix_state)
		return -ENOMEM;

	return 0;
}

static void vlv_free_s0ix_state(struct drm_i915_private *i915)
{
	if (!i915->vlv_s0ix_state)
		return;

	kfree(i915->vlv_s0ix_state);
	i915->vlv_s0ix_state = NULL;
}

567
/**
568
 * i915_driver_early_probe - setup state not requiring device access
569 570 571 572 573 574 575 576
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
577
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
578 579 580
{
	int ret = 0;

581
	if (i915_inject_probe_failure(dev_priv))
582 583
		return -ENODEV;

584 585
	intel_device_info_subplatform_init(dev_priv);

586
	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
587
	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
588

589 590 591
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
L
Lyude 已提交
592

593
	mutex_init(&dev_priv->sb_lock);
594 595 596
	pm_qos_add_request(&dev_priv->sb_qos,
			   PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);

597 598 599
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);
600
	mutex_init(&dev_priv->hdcp_comp_mutex);
601

602
	i915_memcpy_init_early(dev_priv);
603
	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
604

605 606
	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
607
		return ret;
608

609 610 611 612
	ret = vlv_alloc_s0ix_state(dev_priv);
	if (ret < 0)
		goto err_workqueues;

613 614
	intel_wopcm_init_early(&dev_priv->wopcm);

615
	intel_gt_init_early(&dev_priv->gt, dev_priv);
616

617 618
	ret = i915_gem_init_early(dev_priv);
	if (ret < 0)
619
		goto err_gt;
620

621
	/* This must be called before any calls to HAS_PCH_* */
622
	intel_detect_pch(dev_priv);
623

624
	intel_pm_setup(dev_priv);
625
	intel_init_dpio(dev_priv);
626 627
	ret = intel_power_domains_init(dev_priv);
	if (ret < 0)
628
		goto err_gem;
629 630 631 632
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);
633
	intel_display_crc_init(dev_priv);
634

635
	intel_detect_preproduction_hw(dev_priv);
636 637 638

	return 0;

639
err_gem:
640
	i915_gem_cleanup_early(dev_priv);
641
err_gt:
642
	intel_gt_driver_late_release(&dev_priv->gt);
643 644
	vlv_free_s0ix_state(dev_priv);
err_workqueues:
645 646 647 648 649
	i915_workqueues_cleanup(dev_priv);
	return ret;
}

/**
650
 * i915_driver_late_release - cleanup the setup done in
651
 *			       i915_driver_early_probe()
652 653
 * @dev_priv: device private
 */
654
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
655
{
656
	intel_irq_fini(dev_priv);
657
	intel_power_domains_cleanup(dev_priv);
658
	i915_gem_cleanup_early(dev_priv);
659
	intel_gt_driver_late_release(&dev_priv->gt);
660
	vlv_free_s0ix_state(dev_priv);
661
	i915_workqueues_cleanup(dev_priv);
662 663 664

	pm_qos_remove_request(&dev_priv->sb_qos);
	mutex_destroy(&dev_priv->sb_lock);
665 666 667
}

/**
668
 * i915_driver_mmio_probe - setup device MMIO
669 670 671 672 673 674 675
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
676
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
677 678 679
{
	int ret;

680
	if (i915_inject_probe_failure(dev_priv))
681 682
		return -ENODEV;

683
	if (i915_get_bridge_dev(dev_priv))
684 685
		return -EIO;

686
	ret = intel_uncore_init_mmio(&dev_priv->uncore);
687
	if (ret < 0)
688
		goto err_bridge;
689

690 691
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev_priv);
692

693 694
	intel_device_info_init_mmio(dev_priv);

695
	intel_uncore_prune_mmio_domains(&dev_priv->uncore);
696

697
	intel_uc_init_mmio(&dev_priv->gt.uc);
698

699 700 701 702
	ret = intel_engines_init_mmio(dev_priv);
	if (ret)
		goto err_uncore;

703
	i915_gem_init_mmio(dev_priv);
704 705 706

	return 0;

707
err_uncore:
708
	intel_teardown_mchbar(dev_priv);
709
	intel_uncore_fini_mmio(&dev_priv->uncore);
710
err_bridge:
711 712 713 714 715 716
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
717
 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
718 719
 * @dev_priv: device private
 */
720
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
721
{
722
	intel_engines_cleanup(dev_priv);
723
	intel_teardown_mchbar(dev_priv);
724
	intel_uncore_fini_mmio(&dev_priv->uncore);
725 726 727
	pci_dev_put(dev_priv->bridge_dev);
}

728 729
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
730
	intel_gvt_sanitize_options(dev_priv);
731 732
}

V
Ville Syrjälä 已提交
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type

static const char *intel_dram_type_str(enum intel_dram_type type)
{
	static const char * const str[] = {
		DRAM_TYPE_STR(UNKNOWN),
		DRAM_TYPE_STR(DDR3),
		DRAM_TYPE_STR(DDR4),
		DRAM_TYPE_STR(LPDDR3),
		DRAM_TYPE_STR(LPDDR4),
	};

	if (type >= ARRAY_SIZE(str))
		type = INTEL_DRAM_UNKNOWN;

	return str[type];
}

#undef DRAM_TYPE_STR

753 754 755 756 757
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
	return dimm->ranks * 64 / (dimm->width ?: 1);
}

758 759
/* Returns total GB for the whole DIMM */
static int skl_get_dimm_size(u16 val)
760
{
761 762 763 764 765 766
	return val & SKL_DRAM_SIZE_MASK;
}

static int skl_get_dimm_width(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
767
		return 0;
768

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
	switch (val & SKL_DRAM_WIDTH_MASK) {
	case SKL_DRAM_WIDTH_X8:
	case SKL_DRAM_WIDTH_X16:
	case SKL_DRAM_WIDTH_X32:
		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int skl_get_dimm_ranks(u16 val)
{
	if (skl_get_dimm_size(val) == 0)
		return 0;

	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;

	return val + 1;
789 790
}

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
/* Returns total GB for the whole DIMM */
static int cnl_get_dimm_size(u16 val)
{
	return (val & CNL_DRAM_SIZE_MASK) / 2;
}

static int cnl_get_dimm_width(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	switch (val & CNL_DRAM_WIDTH_MASK) {
	case CNL_DRAM_WIDTH_X8:
	case CNL_DRAM_WIDTH_X16:
	case CNL_DRAM_WIDTH_X32:
		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
		return 8 << val;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int cnl_get_dimm_ranks(u16 val)
{
	if (cnl_get_dimm_size(val) == 0)
		return 0;

	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;

	return val + 1;
}

824
static bool
825
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
826
{
827 828
	/* Convert total GB to Gb per DRAM device */
	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
829 830
}

831
static void
832 833
skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
		       struct dram_dimm_info *dimm,
834
		       int channel, char dimm_name, u16 val)
835
{
836 837 838 839 840 841 842 843 844
	if (INTEL_GEN(dev_priv) >= 10) {
		dimm->size = cnl_get_dimm_size(val);
		dimm->width = cnl_get_dimm_width(val);
		dimm->ranks = cnl_get_dimm_ranks(val);
	} else {
		dimm->size = skl_get_dimm_size(val);
		dimm->width = skl_get_dimm_width(val);
		dimm->ranks = skl_get_dimm_ranks(val);
	}
845

846 847 848 849
	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
		      yesno(skl_is_16gb_dimm(dimm)));
}
850

851
static int
852 853
skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
			  struct dram_channel_info *ch,
854 855
			  int channel, u32 val)
{
856 857 858 859
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
			       channel, 'L', val & 0xffff);
	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
			       channel, 'S', val >> 16);
860

861
	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
862
		DRM_DEBUG_KMS("CH%u not populated\n", channel);
863
		return -EINVAL;
864
	}
865

866
	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
867
		ch->ranks = 2;
868
	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
869
		ch->ranks = 2;
870
	else
871
		ch->ranks = 1;
872

873
	ch->is_16gb_dimm =
874 875
		skl_is_16gb_dimm(&ch->dimm_l) ||
		skl_is_16gb_dimm(&ch->dimm_s);
876

877 878
	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
879 880 881 882

	return 0;
}

883
static bool
884 885
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
			const struct dram_channel_info *ch1)
886
{
887
	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
888 889
		(ch0->dimm_s.size == 0 ||
		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
890 891
}

892 893 894 895
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
896
	struct dram_channel_info ch0 = {}, ch1 = {};
897
	u32 val;
898 899
	int ret;

900
	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
901
	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
902 903 904
	if (ret == 0)
		dram_info->num_channels++;

905
	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
906
	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
907 908 909 910 911 912 913 914 915 916 917 918 919
	if (ret == 0)
		dram_info->num_channels++;

	if (dram_info->num_channels == 0) {
		DRM_INFO("Number of memory channels is zero\n");
		return -EINVAL;
	}

	/*
	 * If any of the channel is single rank channel, worst case output
	 * will be same as if single rank memory, so consider single rank
	 * memory.
	 */
920 921
	if (ch0.ranks == 1 || ch1.ranks == 1)
		dram_info->ranks = 1;
922
	else
923
		dram_info->ranks = max(ch0.ranks, ch1.ranks);
924

925
	if (dram_info->ranks == 0) {
926 927 928
		DRM_INFO("couldn't get memory rank information\n");
		return -EINVAL;
	}
929

930
	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
931

932
	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
933

934 935
	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
		      yesno(dram_info->symmetric_memory));
936 937 938
	return 0;
}

V
Ville Syrjälä 已提交
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *dev_priv)
{
	u32 val;

	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);

	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
	case SKL_DRAM_DDR_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case SKL_DRAM_DDR_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case SKL_DRAM_DDR_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case SKL_DRAM_DDR_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

961 962 963 964 965 966 967
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 mem_freq_khz, val;
	int ret;

V
Ville Syrjälä 已提交
968 969 970
	dram_info->type = skl_get_dram_type(dev_priv);
	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
	ret = skl_dram_get_channels_info(dev_priv);
	if (ret)
		return ret;

	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_info->bandwidth_kbps = dram_info->num_channels *
							mem_freq_khz * 8;

	if (dram_info->bandwidth_kbps == 0) {
		DRM_INFO("Couldn't get system memory bandwidth\n");
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

991 992 993 994
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size(u32 val)
{
	switch (val & BXT_DRAM_SIZE_MASK) {
995
	case BXT_DRAM_SIZE_4GBIT:
996
		return 4;
997
	case BXT_DRAM_SIZE_6GBIT:
998
		return 6;
999
	case BXT_DRAM_SIZE_8GBIT:
1000
		return 8;
1001
	case BXT_DRAM_SIZE_12GBIT:
1002
		return 12;
1003
	case BXT_DRAM_SIZE_16GBIT:
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
		return 16;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

static int bxt_get_dimm_width(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;

	return 8 << val;
}

static int bxt_get_dimm_ranks(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return 0;

	switch (val & BXT_DRAM_RANK_MASK) {
	case BXT_DRAM_RANK_SINGLE:
		return 1;
	case BXT_DRAM_RANK_DUAL:
		return 2;
	default:
		MISSING_CASE(val);
		return 0;
	}
}

V
Ville Syrjälä 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static enum intel_dram_type bxt_get_dimm_type(u32 val)
{
	if (!bxt_get_dimm_size(val))
		return INTEL_DRAM_UNKNOWN;

	switch (val & BXT_DRAM_TYPE_MASK) {
	case BXT_DRAM_TYPE_DDR3:
		return INTEL_DRAM_DDR3;
	case BXT_DRAM_TYPE_LPDDR3:
		return INTEL_DRAM_LPDDR3;
	case BXT_DRAM_TYPE_DDR4:
		return INTEL_DRAM_DDR4;
	case BXT_DRAM_TYPE_LPDDR4:
		return INTEL_DRAM_LPDDR4;
	default:
		MISSING_CASE(val);
		return INTEL_DRAM_UNKNOWN;
	}
}

1057 1058 1059 1060 1061
static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
			      u32 val)
{
	dimm->width = bxt_get_dimm_width(val);
	dimm->ranks = bxt_get_dimm_ranks(val);
1062 1063 1064 1065 1066 1067

	/*
	 * Size in register is Gb per DRAM device. Convert to total
	 * GB to match the way we report this for non-LP platforms.
	 */
	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
1068 1069
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	u32 dram_channels;
	u32 mem_freq_khz, val;
	u8 num_active_channels;
	int i;

	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);

	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
	num_active_channels = hweight32(dram_channels);

	/* Each active bit represents 4-byte channel */
	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);

	if (dram_info->bandwidth_kbps == 0) {
		DRM_INFO("Couldn't get system memory bandwidth\n");
		return -EINVAL;
	}

	/*
	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
	 */
	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1098
		struct dram_dimm_info dimm;
V
Ville Syrjälä 已提交
1099
		enum intel_dram_type type;
1100 1101 1102 1103 1104 1105

		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
		if (val == 0xFFFFFFFF)
			continue;

		dram_info->num_channels++;
1106 1107

		bxt_get_dimm_info(&dimm, val);
V
Ville Syrjälä 已提交
1108 1109 1110 1111 1112
		type = bxt_get_dimm_type(val);

		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
			dram_info->type != INTEL_DRAM_UNKNOWN &&
			dram_info->type != type);
1113

V
Ville Syrjälä 已提交
1114
		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
1115
			      i - BXT_D_CR_DRP0_DUNIT_START,
V
Ville Syrjälä 已提交
1116 1117
			      dimm.size, dimm.width, dimm.ranks,
			      intel_dram_type_str(type));
1118 1119 1120 1121 1122 1123

		/*
		 * If any of the channel is single rank channel,
		 * worst case output will be same as if single rank
		 * memory, so consider single rank memory.
		 */
1124
		if (dram_info->ranks == 0)
1125 1126
			dram_info->ranks = dimm.ranks;
		else if (dimm.ranks == 1)
1127
			dram_info->ranks = 1;
V
Ville Syrjälä 已提交
1128 1129 1130

		if (type != INTEL_DRAM_UNKNOWN)
			dram_info->type = type;
1131 1132
	}

V
Ville Syrjälä 已提交
1133 1134 1135
	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
	    dram_info->ranks == 0) {
		DRM_INFO("couldn't get memory information\n");
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
		return -EINVAL;
	}

	dram_info->valid = true;
	return 0;
}

static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
	struct dram_info *dram_info = &dev_priv->dram_info;
	int ret;

1149 1150 1151 1152 1153 1154 1155
	/*
	 * Assume 16Gb DIMMs are present until proven otherwise.
	 * This is only used for the level 0 watermark latency
	 * w/a which does not apply to bxt/glk.
	 */
	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);

1156
	if (INTEL_GEN(dev_priv) < 9)
1157 1158
		return;

1159
	if (IS_GEN9_LP(dev_priv))
1160 1161
		ret = bxt_get_dram_info(dev_priv);
	else
1162
		ret = skl_get_dram_info(dev_priv);
1163 1164 1165
	if (ret)
		return;

1166 1167 1168 1169
	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
		      dram_info->bandwidth_kbps,
		      dram_info->num_channels);

1170
	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
1171
		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1172 1173
}

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
	const unsigned int sets[4] = { 1, 1, 2, 2 };

	return EDRAM_NUM_BANKS(cap) *
		ways[EDRAM_WAYS_IDX(cap)] *
		sets[EDRAM_SETS_IDX(cap)];
}

static void edram_detect(struct drm_i915_private *dev_priv)
{
	u32 edram_cap = 0;

	if (!(IS_HASWELL(dev_priv) ||
	      IS_BROADWELL(dev_priv) ||
	      INTEL_GEN(dev_priv) >= 9))
		return;

	edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);

	/* NB: We can't write IDICR yet because we don't have gt funcs set up */

	if (!(edram_cap & EDRAM_ENABLED))
		return;

	/*
	 * The needed capability bits for size calculation are not there with
	 * pre gen9 so return 128MB always.
	 */
	if (INTEL_GEN(dev_priv) < 9)
		dev_priv->edram_size_mb = 128;
	else
		dev_priv->edram_size_mb =
			gen9_edram_size_mb(dev_priv, edram_cap);

1210 1211
	dev_info(dev_priv->drm.dev,
		 "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
1212 1213
}

1214
/**
1215
 * i915_driver_hw_probe - setup state requiring device access
1216 1217 1218 1219 1220
 * @dev_priv: device private
 *
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
 */
1221
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
1222
{
D
David Weinehall 已提交
1223
	struct pci_dev *pdev = dev_priv->drm.pdev;
1224 1225
	int ret;

1226
	if (i915_inject_probe_failure(dev_priv))
1227 1228
		return -ENODEV;

1229
	intel_device_info_runtime_init(dev_priv);
1230

1231 1232
	if (HAS_PPGTT(dev_priv)) {
		if (intel_vgpu_active(dev_priv) &&
1233
		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
1234 1235 1236 1237 1238 1239
			i915_report_error(dev_priv,
					  "incompatible vGPU found, support for isolated ppGTT required\n");
			return -ENXIO;
		}
	}

1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	if (HAS_EXECLISTS(dev_priv)) {
		/*
		 * Older GVT emulation depends upon intercepting CSB mmio,
		 * which we no longer use, preferring to use the HWSP cache
		 * instead.
		 */
		if (intel_vgpu_active(dev_priv) &&
		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
			i915_report_error(dev_priv,
					  "old vGPU host found, support for HWSP emulation required\n");
			return -ENXIO;
		}
	}

1254
	intel_sanitize_options(dev_priv);
1255

1256 1257 1258
	/* needs to be done before ggtt probe */
	edram_detect(dev_priv);

1259 1260
	i915_perf_init(dev_priv);

1261
	ret = i915_ggtt_probe_hw(dev_priv);
1262
	if (ret)
1263
		goto err_perf;
1264

1265 1266 1267 1268
	/*
	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
	 * otherwise the vga fbdev driver falls over.
	 */
1269 1270 1271
	ret = i915_kick_out_firmware_fb(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1272
		goto err_ggtt;
1273 1274
	}

1275
	ret = vga_remove_vgacon(pdev);
1276 1277
	if (ret) {
		DRM_ERROR("failed to remove conflicting VGA console\n");
1278
		goto err_ggtt;
1279 1280
	}

1281
	ret = i915_ggtt_init_hw(dev_priv);
1282
	if (ret)
1283
		goto err_ggtt;
1284

1285
	intel_gt_init_hw_early(dev_priv);
1286

1287
	ret = i915_ggtt_enable_hw(dev_priv);
1288 1289
	if (ret) {
		DRM_ERROR("failed to enable GGTT\n");
1290
		goto err_ggtt;
1291 1292
	}

D
David Weinehall 已提交
1293
	pci_set_master(pdev);
1294

1295 1296 1297 1298 1299 1300
	/*
	 * We don't have a max segment size, so set it to the max so sg's
	 * debugging layer doesn't complain
	 */
	dma_set_max_seg_size(&pdev->dev, UINT_MAX);

1301
	/* overlay on gen2 is broken and can't address above 1G */
1302
	if (IS_GEN(dev_priv, 2)) {
D
David Weinehall 已提交
1303
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1304 1305 1306
		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

1307
			goto err_ggtt;
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
		}
	}

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
1319
	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
D
David Weinehall 已提交
1320
		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1321 1322 1323 1324

		if (ret) {
			DRM_ERROR("failed to set DMA mask\n");

1325
			goto err_ggtt;
1326 1327 1328 1329 1330 1331
		}
	}

	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

1332
	intel_gt_init_workarounds(dev_priv);
1333 1334 1335 1336 1337 1338 1339 1340 1341

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
1342 1343 1344 1345
	 * be lost or delayed, and was defeatured. MSI interrupts seem to
	 * get lost on g4x as well, and interrupt delivery seems to stay
	 * properly dead afterwards. So we'll just disable them for all
	 * pre-gen5 chipsets.
1346 1347 1348 1349 1350 1351
	 *
	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
	 * interrupts even when in MSI mode. This results in spurious
	 * interrupt warnings if the legacy irq no. is shared with another
	 * device. The kernel then disables that interrupt source and so
	 * prevents the other device from working properly.
1352
	 */
1353
	if (INTEL_GEN(dev_priv) >= 5) {
D
David Weinehall 已提交
1354
		if (pci_enable_msi(pdev) < 0)
1355 1356 1357
			DRM_DEBUG_DRIVER("can't enable MSI");
	}

1358 1359
	ret = intel_gvt_init(dev_priv);
	if (ret)
1360 1361 1362
		goto err_msi;

	intel_opregion_setup(dev_priv);
1363 1364 1365 1366 1367 1368
	/*
	 * Fill the dram structure to get the system raw bandwidth and
	 * dram info. This will be used for memory latency calculation.
	 */
	intel_get_dram_info(dev_priv);

1369
	intel_bw_init_hw(dev_priv);
1370

1371 1372
	return 0;

1373 1374 1375 1376
err_msi:
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
	pm_qos_remove_request(&dev_priv->pm_qos);
1377
err_ggtt:
1378
	i915_ggtt_driver_release(dev_priv);
1379 1380
err_perf:
	i915_perf_fini(dev_priv);
1381 1382 1383 1384
	return ret;
}

/**
1385
 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
1386 1387
 * @dev_priv: device private
 */
1388
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
1389
{
D
David Weinehall 已提交
1390
	struct pci_dev *pdev = dev_priv->drm.pdev;
1391

1392 1393
	i915_perf_fini(dev_priv);

D
David Weinehall 已提交
1394 1395
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408

	pm_qos_remove_request(&dev_priv->pm_qos);
}

/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
1409
	struct drm_device *dev = &dev_priv->drm;
1410

1411
	i915_gem_driver_register(dev_priv);
1412
	i915_pmu_register(dev_priv);
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423

	/*
	 * Notify a valid surface after modesetting,
	 * when running inside a VM.
	 */
	if (intel_vgpu_active(dev_priv))
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);

	/* Reveal our presence to userspace */
	if (drm_dev_register(dev, 0) == 0) {
		i915_debugfs_register(dev_priv);
D
David Weinehall 已提交
1424
		i915_setup_sysfs(dev_priv);
1425 1426 1427

		/* Depends on sysfs having been initialized */
		i915_perf_register(dev_priv);
1428 1429 1430
	} else
		DRM_ERROR("Failed to register driver for userspace access!\n");

1431
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
1432 1433 1434 1435 1436
		/* Must be done after probing outputs */
		intel_opregion_register(dev_priv);
		acpi_video_register();
	}

1437
	intel_gt_driver_register(&dev_priv->gt);
1438

1439
	intel_audio_init(dev_priv);
1440 1441 1442 1443 1444 1445 1446 1447 1448

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. We do it last so that the async config
	 * cannot run before the connectors are registered.
	 */
	intel_fbdev_initial_config_async(dev);
1449 1450 1451 1452 1453

	/*
	 * We need to coordinate the hotplugs with the asynchronous fbdev
	 * configuration, for which we use the fbdev->async_cookie.
	 */
1454
	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
1455
		drm_kms_helper_poll_init(dev);
1456

1457
	intel_power_domains_enable(dev_priv);
1458
	intel_runtime_pm_enable(&dev_priv->runtime_pm);
1459 1460 1461 1462 1463 1464 1465 1466
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
1467
	intel_runtime_pm_disable(&dev_priv->runtime_pm);
1468
	intel_power_domains_disable(dev_priv);
1469

1470
	intel_fbdev_unregister(dev_priv);
1471
	intel_audio_deinit(dev_priv);
1472

1473 1474 1475 1476 1477 1478 1479
	/*
	 * After flushing the fbdev (incl. a late async config which will
	 * have delayed queuing of a hotplug event), then flush the hotplug
	 * events.
	 */
	drm_kms_helper_poll_fini(&dev_priv->drm);

1480
	intel_gt_driver_unregister(&dev_priv->gt);
1481 1482 1483
	acpi_video_unregister();
	intel_opregion_unregister(dev_priv);

1484
	i915_perf_unregister(dev_priv);
1485
	i915_pmu_unregister(dev_priv);
1486

D
David Weinehall 已提交
1487
	i915_teardown_sysfs(dev_priv);
1488
	drm_dev_unplug(&dev_priv->drm);
1489

1490
	i915_gem_driver_unregister(dev_priv);
1491 1492
}

1493 1494 1495 1496 1497
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
	if (drm_debug & DRM_UT_DRIVER) {
		struct drm_printer p = drm_debug_printer("i915 device info:");

1498
		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
1499 1500 1501
			   INTEL_DEVID(dev_priv),
			   INTEL_REVID(dev_priv),
			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
1502 1503
			   intel_subplatform(RUNTIME_INFO(dev_priv),
					     INTEL_INFO(dev_priv)->platform),
1504 1505 1506
			   INTEL_GEN(dev_priv));

		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1507
		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1508 1509 1510 1511 1512 1513
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
		DRM_INFO("DRM_I915_DEBUG enabled\n");
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1514 1515
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
		DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1516 1517
}

1518 1519 1520 1521 1522 1523 1524
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
	struct intel_device_info *device_info;
	struct drm_i915_private *i915;
1525
	int err;
1526 1527 1528

	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
	if (!i915)
1529
		return ERR_PTR(-ENOMEM);
1530

1531 1532
	err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
	if (err) {
1533
		kfree(i915);
1534
		return ERR_PTR(err);
1535 1536 1537
	}

	i915->drm.dev_private = i915;
1538 1539 1540

	i915->drm.pdev = pdev;
	pci_set_drvdata(pdev, i915);
1541 1542 1543 1544

	/* Setup the write-once "constant" device info */
	device_info = mkwrite_device_info(i915);
	memcpy(device_info, match_info, sizeof(*device_info));
1545
	RUNTIME_INFO(i915)->device_id = pdev->device;
1546

1547
	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1548 1549 1550 1551

	return i915;
}

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
static void i915_driver_destroy(struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;

	drm_dev_fini(&i915->drm);
	kfree(i915);

	/* And make sure we never chase our dangling pointer from pci_dev */
	pci_set_drvdata(pdev, NULL);
}

1563
/**
1564
 * i915_driver_probe - setup chip and create an initial config
1565 1566
 * @pdev: PCI device
 * @ent: matching PCI ID entry
1567
 *
1568
 * The driver probe routine has to do several things:
1569 1570 1571 1572 1573
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
1574
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1575
{
1576 1577
	const struct intel_device_info *match_info =
		(struct intel_device_info *)ent->driver_data;
1578 1579
	struct drm_i915_private *dev_priv;
	int ret;
1580

1581
	dev_priv = i915_driver_create(pdev, ent);
1582 1583
	if (IS_ERR(dev_priv))
		return PTR_ERR(dev_priv);
1584

1585 1586 1587 1588
	/* Disable nuclear pageflip by default on pre-ILK */
	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;

1589 1590
	ret = pci_enable_device(pdev);
	if (ret)
1591
		goto out_fini;
D
Damien Lespiau 已提交
1592

1593
	ret = i915_driver_early_probe(dev_priv);
1594 1595
	if (ret < 0)
		goto out_pci_disable;
1596

1597
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
L
Linus Torvalds 已提交
1598

1599 1600
	i915_detect_vgpu(dev_priv);

1601
	ret = i915_driver_mmio_probe(dev_priv);
1602 1603
	if (ret < 0)
		goto out_runtime_pm_put;
J
Jesse Barnes 已提交
1604

1605
	ret = i915_driver_hw_probe(dev_priv);
1606 1607
	if (ret < 0)
		goto out_cleanup_mmio;
1608

1609
	ret = i915_driver_modeset_probe(dev_priv);
1610
	if (ret < 0)
1611
		goto out_cleanup_hw;
1612 1613 1614

	i915_driver_register(dev_priv);

1615
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1616

1617 1618
	i915_welcome_messages(dev_priv);

1619 1620 1621
	return 0;

out_cleanup_hw:
1622
	i915_driver_hw_remove(dev_priv);
1623
	i915_ggtt_driver_release(dev_priv);
1624
out_cleanup_mmio:
1625
	i915_driver_mmio_release(dev_priv);
1626
out_runtime_pm_put:
1627
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1628
	i915_driver_late_release(dev_priv);
1629 1630
out_pci_disable:
	pci_disable_device(pdev);
1631
out_fini:
1632
	i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
1633
	i915_driver_destroy(dev_priv);
1634 1635 1636
	return ret;
}

1637
void i915_driver_remove(struct drm_i915_private *i915)
1638
{
1639
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1640

1641
	i915_driver_unregister(i915);
1642

1643 1644 1645 1646 1647
	/*
	 * After unregistering the device to prevent any new users, cancel
	 * all in-flight requests so that we can quickly unbind the active
	 * resources.
	 */
1648
	intel_gt_set_wedged(&i915->gt);
1649

1650 1651 1652
	/* Flush any external code that still may be under the RCU lock */
	synchronize_rcu();

1653
	i915_gem_suspend(i915);
B
Ben Widawsky 已提交
1654

1655
	drm_atomic_helper_shutdown(&i915->drm);
1656

1657
	intel_gvt_driver_remove(i915);
1658

1659
	i915_driver_modeset_remove(i915);
1660

1661
	/* Free error state after interrupts are fully disabled. */
1662 1663
	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
	i915_reset_error_state(i915);
1664

1665
	i915_gem_driver_remove(i915);
1666

1667
	intel_power_domains_driver_remove(i915);
1668

1669
	i915_driver_hw_remove(i915);
1670

1671
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1672 1673 1674 1675 1676
}

static void i915_driver_release(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
1677
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1678

1679
	disable_rpm_wakeref_asserts(rpm);
1680

1681
	i915_gem_driver_release(dev_priv);
1682

1683
	i915_ggtt_driver_release(dev_priv);
1684

1685
	i915_driver_mmio_release(dev_priv);
1686

1687
	enable_rpm_wakeref_asserts(rpm);
1688
	intel_runtime_pm_driver_release(rpm);
1689

1690
	i915_driver_late_release(dev_priv);
1691
	i915_driver_destroy(dev_priv);
1692 1693
}

1694
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1695
{
1696
	struct drm_i915_private *i915 = to_i915(dev);
1697
	int ret;
1698

1699
	ret = i915_gem_open(i915, file);
1700 1701
	if (ret)
		return ret;
1702

1703 1704
	return 0;
}
1705

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
 * Additionally, in the non-mode setting case, we'll tear down the GTT
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
static void i915_driver_lastclose(struct drm_device *dev)
{
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
}
1723

1724
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1725
{
1726 1727
	struct drm_i915_file_private *file_priv = file->driver_priv;

1728
	mutex_lock(&dev->struct_mutex);
1729
	i915_gem_context_close(file);
1730 1731 1732
	i915_gem_release(dev, file);
	mutex_unlock(&dev->struct_mutex);

1733
	kfree_rcu(file_priv, rcu);
1734 1735 1736

	/* Catch up with all the deferred frees from "this" client */
	i915_gem_flush_free_objects(to_i915(dev));
1737 1738
}

1739 1740
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
1741
	struct drm_device *dev = &dev_priv->drm;
1742
	struct intel_encoder *encoder;
1743 1744

	drm_modeset_lock_all(dev);
1745 1746 1747
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
1748 1749 1750
	drm_modeset_unlock_all(dev);
}

1751 1752
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
1753
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1754

1755 1756 1757 1758 1759 1760 1761 1762
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
1763

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
static int i915_drm_prepare(struct drm_device *dev)
{
	struct drm_i915_private *i915 = to_i915(dev);

	/*
	 * NB intel_display_suspend() may issue new requests after we've
	 * ostensibly marked the GPU as ready-to-sleep here. We need to
	 * split out that work and pull it forward so that after point,
	 * the GPU is not woken again.
	 */
1774
	i915_gem_suspend(i915);
1775

1776
	return 0;
1777 1778
}

1779
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
1780
{
1781
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1782
	struct pci_dev *pdev = dev_priv->drm.pdev;
1783
	pci_power_t opregion_target_state;
1784

1785
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1786

1787 1788
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
1789
	intel_power_domains_disable(dev_priv);
1790

1791 1792
	drm_kms_helper_poll_disable(dev);

D
David Weinehall 已提交
1793
	pci_save_state(pdev);
J
Jesse Barnes 已提交
1794

1795
	intel_display_suspend(dev);
1796

1797
	intel_dp_mst_suspend(dev_priv);
1798

1799 1800
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
1801

1802
	intel_suspend_encoders(dev_priv);
1803

1804
	intel_suspend_hw(dev_priv);
1805

1806
	i915_gem_suspend_gtt_mappings(dev_priv);
1807

1808
	i915_save_state(dev_priv);
1809

1810
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1811
	intel_opregion_suspend(dev_priv, opregion_target_state);
1812

1813
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1814

1815 1816
	dev_priv->suspend_count++;

1817
	intel_csr_ucode_suspend(dev_priv);
1818

1819
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1820

1821
	return 0;
1822 1823
}

1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
{
	if (hibernate)
		return I915_DRM_SUSPEND_HIBERNATE;

	if (suspend_to_idle(dev_priv))
		return I915_DRM_SUSPEND_IDLE;

	return I915_DRM_SUSPEND_MEM;
}

1836
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1837
{
1838
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1839
	struct pci_dev *pdev = dev_priv->drm.pdev;
1840
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1841
	int ret = 0;
1842

1843
	disable_rpm_wakeref_asserts(rpm);
1844

1845 1846
	i915_gem_suspend_late(dev_priv);

1847
	intel_uncore_suspend(&dev_priv->uncore);
1848

1849 1850
	intel_power_domains_suspend(dev_priv,
				    get_suspend_mode(dev_priv, hibernation));
1851

1852 1853 1854
	intel_display_power_suspend_late(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1855
		ret = vlv_suspend_complete(dev_priv);
1856 1857 1858

	if (ret) {
		DRM_ERROR("Suspend complete failed: %d\n", ret);
1859
		intel_power_domains_resume(dev_priv);
1860

1861
		goto out;
1862 1863
	}

D
David Weinehall 已提交
1864
	pci_disable_device(pdev);
1865
	/*
1866
	 * During hibernation on some platforms the BIOS may try to access
1867 1868
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
1869 1870 1871 1872 1873 1874 1875
	 * power down the device properly. The issue was seen on multiple old
	 * GENs with different BIOS vendors, so having an explicit blacklist
	 * is inpractical; apply the workaround on everything pre GEN6. The
	 * platforms where the issue was seen:
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
	 * Fujitsu FSC S7110
	 * Acer Aspire 1830T
1876
	 */
1877
	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
D
David Weinehall 已提交
1878
		pci_set_power_state(pdev, PCI_D3hot);
1879

1880
out:
1881
	enable_rpm_wakeref_asserts(rpm);
1882
	if (!dev_priv->uncore.user_forcewake_count)
1883
		intel_runtime_pm_driver_release(rpm);
1884 1885

	return ret;
1886 1887
}

1888 1889
static int
i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1890 1891 1892
{
	int error;

1893 1894 1895
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
			 state.event != PM_EVENT_FREEZE))
		return -EINVAL;
1896

1897
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1898
		return 0;
1899

1900
	error = i915_drm_suspend(&i915->drm);
1901 1902 1903
	if (error)
		return error;

1904
	return i915_drm_suspend_late(&i915->drm, false);
J
Jesse Barnes 已提交
1905 1906
}

1907
static int i915_drm_resume(struct drm_device *dev)
1908
{
1909
	struct drm_i915_private *dev_priv = to_i915(dev);
1910
	int ret;
1911

1912
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1913
	intel_gt_pm_disable(&dev_priv->gt);
1914

1915 1916
	i915_gem_sanitize(dev_priv);

1917
	ret = i915_ggtt_enable_hw(dev_priv);
1918 1919 1920
	if (ret)
		DRM_ERROR("failed to re-enable GGTT\n");

1921 1922 1923 1924 1925
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_restore_gtt_mappings(dev_priv);
	i915_gem_restore_fences(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

1926 1927
	intel_csr_ucode_resume(dev_priv);

1928
	i915_restore_state(dev_priv);
1929
	intel_pps_unlock_regs_wa(dev_priv);
1930

1931
	intel_init_pch_refclk(dev_priv);
1932

1933 1934 1935 1936 1937
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
1938 1939
	 * drm_mode_config_reset() needs AUX interrupts.
	 *
1940 1941 1942 1943 1944
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

1945 1946
	drm_mode_config_reset(dev);

1947
	i915_gem_resume(dev_priv);
1948

1949
	intel_modeset_init_hw(dev_priv);
1950
	intel_init_clock_gating(dev_priv);
1951

1952 1953
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
1954
		dev_priv->display.hpd_irq_setup(dev_priv);
1955
	spin_unlock_irq(&dev_priv->irq_lock);
1956

1957
	intel_dp_mst_resume(dev_priv);
1958

1959 1960
	intel_display_resume(dev);

1961 1962
	drm_kms_helper_poll_enable(dev);

1963 1964 1965
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
1966
	 * bother with the tiny race here where we might lose hotplug
1967 1968 1969
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
1970

1971
	intel_opregion_resume(dev_priv);
1972

1973
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1974

1975 1976
	intel_power_domains_enable(dev_priv);

1977
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1978

1979
	return 0;
1980 1981
}

1982
static int i915_drm_resume_early(struct drm_device *dev)
1983
{
1984
	struct drm_i915_private *dev_priv = to_i915(dev);
D
David Weinehall 已提交
1985
	struct pci_dev *pdev = dev_priv->drm.pdev;
1986
	int ret;
1987

1988 1989 1990 1991 1992 1993 1994 1995 1996
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007

	/*
	 * Note that we need to set the power state explicitly, since we
	 * powered off the device during freeze and the PCI core won't power
	 * it back up for us during thaw. Powering off the device during
	 * freeze is not a hard requirement though, and during the
	 * suspend/resume phases the PCI core makes sure we get here with the
	 * device powered on. So in case we change our freeze logic and keep
	 * the device powered we can also remove the following set power state
	 * call.
	 */
D
David Weinehall 已提交
2008
	ret = pci_set_power_state(pdev, PCI_D0);
2009 2010
	if (ret) {
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2011
		return ret;
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	}

	/*
	 * Note that pci_enable_device() first enables any parent bridge
	 * device and only then sets the power state for this device. The
	 * bridge enabling is a nop though, since bridge devices are resumed
	 * first. The order of enabling power and enabling the device is
	 * imposed by the PCI core as described above, so here we preserve the
	 * same order for the freeze/thaw phases.
	 *
	 * TODO: eventually we should remove pci_disable_device() /
	 * pci_enable_enable_device() from suspend/resume. Due to how they
	 * depend on the device enable refcount we can't anyway depend on them
	 * disabling/enabling the device.
	 */
2027 2028
	if (pci_enable_device(pdev))
		return -EIO;
2029

D
David Weinehall 已提交
2030
	pci_set_master(pdev);
2031

2032
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2033

2034
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2035
		ret = vlv_resume_prepare(dev_priv, false);
2036
	if (ret)
2037 2038
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);
2039

2040 2041
	intel_uncore_resume_early(&dev_priv->uncore);

2042
	intel_gt_check_and_clear_faults(&dev_priv->gt);
2043

2044
	intel_display_power_resume_early(dev_priv);
2045

2046
	intel_gt_pm_disable(&dev_priv->gt);
2047

2048
	intel_power_domains_resume(dev_priv);
2049

2050
	intel_gt_sanitize(&dev_priv->gt, true);
2051

2052
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2053

2054
	return ret;
2055 2056
}

2057
static int i915_resume_switcheroo(struct drm_i915_private *i915)
2058
{
2059
	int ret;
2060

2061
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2062 2063
		return 0;

2064
	ret = i915_drm_resume_early(&i915->drm);
2065 2066 2067
	if (ret)
		return ret;

2068
	return i915_drm_resume(&i915->drm);
2069 2070
}

2071 2072
static int i915_pm_prepare(struct device *kdev)
{
2073
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2074

2075
	if (!i915) {
2076 2077 2078 2079
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

2080
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2081 2082
		return 0;

2083
	return i915_drm_prepare(&i915->drm);
2084 2085
}

2086
static int i915_pm_suspend(struct device *kdev)
2087
{
2088
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2089

2090
	if (!i915) {
2091
		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2092 2093
		return -ENODEV;
	}
2094

2095
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2096 2097
		return 0;

2098
	return i915_drm_suspend(&i915->drm);
2099 2100
}

2101
static int i915_pm_suspend_late(struct device *kdev)
2102
{
2103
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2104 2105

	/*
D
Damien Lespiau 已提交
2106
	 * We have a suspend ordering issue with the snd-hda driver also
2107 2108 2109 2110 2111 2112 2113
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
2114
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2115
		return 0;
2116

2117
	return i915_drm_suspend_late(&i915->drm, false);
2118 2119
}

2120
static int i915_pm_poweroff_late(struct device *kdev)
2121
{
2122
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2123

2124
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2125 2126
		return 0;

2127
	return i915_drm_suspend_late(&i915->drm, true);
2128 2129
}

2130
static int i915_pm_resume_early(struct device *kdev)
2131
{
2132
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2133

2134
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2135 2136
		return 0;

2137
	return i915_drm_resume_early(&i915->drm);
2138 2139
}

2140
static int i915_pm_resume(struct device *kdev)
2141
{
2142
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2143

2144
	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2145 2146
		return 0;

2147
	return i915_drm_resume(&i915->drm);
2148 2149
}

2150
/* freeze: before creating the hibernation_image */
2151
static int i915_pm_freeze(struct device *kdev)
2152
{
2153
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2154 2155
	int ret;

2156 2157
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend(&i915->drm);
2158 2159 2160
		if (ret)
			return ret;
	}
2161

2162
	ret = i915_gem_freeze(i915);
2163 2164 2165 2166
	if (ret)
		return ret;

	return 0;
2167 2168
}

2169
static int i915_pm_freeze_late(struct device *kdev)
2170
{
2171
	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2172 2173
	int ret;

2174 2175
	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
		ret = i915_drm_suspend_late(&i915->drm, true);
2176 2177 2178
		if (ret)
			return ret;
	}
2179

2180
	ret = i915_gem_freeze_late(i915);
2181 2182 2183 2184
	if (ret)
		return ret;

	return 0;
2185 2186 2187
}

/* thaw: called after creating the hibernation image, but before turning off. */
2188
static int i915_pm_thaw_early(struct device *kdev)
2189
{
2190
	return i915_pm_resume_early(kdev);
2191 2192
}

2193
static int i915_pm_thaw(struct device *kdev)
2194
{
2195
	return i915_pm_resume(kdev);
2196 2197 2198
}

/* restore: called after loading the hibernation image. */
2199
static int i915_pm_restore_early(struct device *kdev)
2200
{
2201
	return i915_pm_resume_early(kdev);
2202 2203
}

2204
static int i915_pm_restore(struct device *kdev)
2205
{
2206
	return i915_pm_resume(kdev);
2207 2208
}

2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
2237
	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2238 2239
	int i;

2240 2241 2242
	if (!s)
		return;

2243 2244 2245 2246 2247 2248 2249 2250
	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2251
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2252 2253

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2254
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2295
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
2307
	s->pcbr			= I915_READ(VLV_PCBR);
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
2321
	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2322 2323 2324
	u32 val;
	int i;

2325 2326 2327
	if (!s)
		return;

2328 2329 2330 2331 2332 2333 2334 2335
	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2336
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2337 2338

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2339
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2380
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
2405
	I915_WRITE(VLV_PCBR,			s->pcbr);
2406 2407 2408
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

2409
static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
2410 2411
				  u32 mask, u32 val)
{
2412 2413 2414 2415
	i915_reg_t reg = VLV_GTLC_PW_STATUS;
	u32 reg_value;
	int ret;

2416 2417 2418 2419 2420 2421 2422
	/* The HW does not like us polling for PW_STATUS frequently, so
	 * use the sleeping loop rather than risk the busy spin within
	 * intel_wait_for_register().
	 *
	 * Transitioning between RC6 states should be at most 2ms (see
	 * valleyview_enable_rps) so use a 3ms timeout.
	 */
2423 2424 2425
	ret = wait_for(((reg_value =
			 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
		       == val, 3);
2426 2427 2428 2429 2430

	/* just trace the final value */
	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);

	return ret;
2431 2432
}

2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

2447
	err = intel_wait_for_register(&dev_priv->uncore,
2448 2449 2450 2451
				      VLV_GTLC_SURVIVABILITY_REG,
				      VLV_GFX_CLK_STATUS_BIT,
				      VLV_GFX_CLK_STATUS_BIT,
				      20);
2452 2453 2454 2455 2456 2457 2458
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
}

2459 2460
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
2461
	u32 mask;
2462
	u32 val;
2463
	int err;
2464 2465 2466 2467 2468 2469 2470 2471

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

2472 2473 2474 2475
	mask = VLV_GTLC_ALLOWWAKEACK;
	val = allow ? mask : 0;

	err = vlv_wait_for_pw_status(dev_priv, mask, val);
2476 2477
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
2478

2479 2480 2481
	return err;
}

2482 2483
static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				  bool wait_for_on)
2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
{
	u32 mask;
	u32 val;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
2494 2495 2496
	 *
	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
	 * reset and we are trying to force the machine to sleep.
2497
	 */
2498
	if (vlv_wait_for_pw_status(dev_priv, mask, val))
2499 2500
		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
				 onoff(wait_for_on));
2501 2502 2503 2504 2505 2506 2507
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

2508
	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2509 2510 2511
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

2512
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2513 2514 2515 2516 2517 2518 2519 2520
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
2521
	vlv_wait_for_gt_wells(dev_priv, false);
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
2535

2536
	vlv_save_gunit_s0ix_state(dev_priv);
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

2553 2554
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
{
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

2566
	vlv_restore_gunit_s0ix_state(dev_priv);
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

2578
	if (rpm_resume)
2579
		intel_init_clock_gating(dev_priv);
2580 2581 2582 2583

	return ret;
}

2584
static int intel_runtime_suspend(struct device *kdev)
2585
{
2586
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2587
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2588
	int ret = 0;
2589

2590
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2591 2592
		return -ENODEV;

2593 2594
	DRM_DEBUG_KMS("Suspending device\n");

2595
	disable_rpm_wakeref_asserts(rpm);
2596

2597 2598 2599 2600
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
2601
	i915_gem_runtime_suspend(dev_priv);
2602

2603
	intel_gt_runtime_suspend(&dev_priv->gt);
2604

2605
	intel_runtime_pm_disable_interrupts(dev_priv);
2606

2607
	intel_uncore_suspend(&dev_priv->uncore);
2608

2609 2610 2611
	intel_display_power_suspend(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2612 2613
		ret = vlv_suspend_complete(dev_priv);

2614 2615
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2616
		intel_uncore_runtime_resume(&dev_priv->uncore);
2617

2618
		intel_runtime_pm_enable_interrupts(dev_priv);
2619

2620
		intel_gt_runtime_resume(&dev_priv->gt);
2621 2622 2623

		i915_gem_restore_fences(dev_priv);

2624
		enable_rpm_wakeref_asserts(rpm);
2625

2626 2627
		return ret;
	}
2628

2629
	enable_rpm_wakeref_asserts(rpm);
2630
	intel_runtime_pm_driver_release(rpm);
2631

2632
	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
2633 2634
		DRM_ERROR("Unclaimed access detected prior to suspending\n");

2635
	rpm->suspended = true;
2636 2637

	/*
2638 2639
	 * FIXME: We really should find a document that references the arguments
	 * used below!
2640
	 */
2641
	if (IS_BROADWELL(dev_priv)) {
2642 2643 2644 2645 2646 2647
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
2648
		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2649
	} else {
2650 2651 2652 2653 2654 2655 2656
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
2657
		intel_opregion_notify_adapter(dev_priv, PCI_D1);
2658
	}
2659

2660
	assert_forcewakes_inactive(&dev_priv->uncore);
2661

2662
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2663 2664
		intel_hpd_poll_init(dev_priv);

2665
	DRM_DEBUG_KMS("Device suspended\n");
2666 2667 2668
	return 0;
}

2669
static int intel_runtime_resume(struct device *kdev)
2670
{
2671
	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2672
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2673
	int ret = 0;
2674

2675
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2676
		return -ENODEV;
2677 2678 2679

	DRM_DEBUG_KMS("Resuming device\n");

2680 2681
	WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
	disable_rpm_wakeref_asserts(rpm);
2682

2683
	intel_opregion_notify_adapter(dev_priv, PCI_D0);
2684
	rpm->suspended = false;
2685
	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
2686
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2687

2688 2689 2690
	intel_display_power_resume(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2691 2692
		ret = vlv_resume_prepare(dev_priv, true);

2693
	intel_uncore_runtime_resume(&dev_priv->uncore);
2694

2695 2696
	intel_runtime_pm_enable_interrupts(dev_priv);

2697 2698 2699 2700
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
2701
	intel_gt_runtime_resume(&dev_priv->gt);
2702
	i915_gem_restore_fences(dev_priv);
2703

2704 2705 2706 2707 2708
	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
2709
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2710 2711
		intel_hpd_init(dev_priv);

2712 2713
	intel_enable_ipc(dev_priv);

2714
	enable_rpm_wakeref_asserts(rpm);
2715

2716 2717 2718 2719 2720 2721
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
2722 2723
}

2724
const struct dev_pm_ops i915_pm_ops = {
2725 2726 2727 2728
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
2729
	.prepare = i915_pm_prepare,
2730
	.suspend = i915_pm_suspend,
2731 2732
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
2733
	.resume = i915_pm_resume,
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
2750 2751 2752 2753
	.freeze = i915_pm_freeze,
	.freeze_late = i915_pm_freeze_late,
	.thaw_early = i915_pm_thaw_early,
	.thaw = i915_pm_thaw,
2754
	.poweroff = i915_pm_suspend,
2755
	.poweroff_late = i915_pm_poweroff_late,
2756 2757
	.restore_early = i915_pm_restore_early,
	.restore = i915_pm_restore,
2758 2759

	/* S0ix (via runtime suspend) event handlers */
2760 2761
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
2762 2763
};

2764
static const struct vm_operations_struct i915_gem_vm_ops = {
2765
	.fault = i915_gem_fault,
2766 2767
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
2768 2769
};

2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
	.compat_ioctl = i915_compat_ioctl,
	.llseek = noop_llseek,
};

2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

static const struct drm_ioctl_desc i915_ioctls[] = {
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2796
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2808
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2809
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
2810 2811
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2812
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
2813 2814
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2815
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
2816 2817 2818 2819 2820 2821 2822 2823 2824
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2825 2826
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2827
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2828
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
2829
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
2830 2831 2832 2833
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
2834
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
2835
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2836 2837 2838 2839 2840 2841
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2842
	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2843 2844 2845
	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
2846 2847
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
2848 2849
};

L
Linus Torvalds 已提交
2850
static struct drm_driver driver = {
2851 2852
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
2853
	 */
2854
	.driver_features =
2855
	    DRIVER_GEM |
2856
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
2857
	.release = i915_driver_release,
2858
	.open = i915_driver_open,
2859
	.lastclose = i915_driver_lastclose,
2860
	.postclose = i915_driver_postclose,
2861

2862
	.gem_close_object = i915_gem_close_object,
C
Chris Wilson 已提交
2863
	.gem_free_object_unlocked = i915_gem_free_object,
2864
	.gem_vm_ops = &i915_gem_vm_ops,
2865 2866 2867 2868 2869 2870

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

2871 2872 2873
	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
	.get_scanout_position = i915_get_crtc_scanoutpos,

2874
	.dumb_create = i915_gem_dumb_create,
2875
	.dumb_map_offset = i915_gem_mmap_gtt,
L
Linus Torvalds 已提交
2876
	.ioctls = i915_ioctls,
2877
	.num_ioctls = ARRAY_SIZE(i915_ioctls),
2878
	.fops = &i915_driver_fops,
2879 2880 2881 2882 2883 2884
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
2885
};
2886 2887 2888 2889

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_drm.c"
#endif