i915_drv.c 26.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/device.h>
31 32
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
33
#include "i915_drv.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
L
Linus Torvalds 已提交
36

J
Jesse Barnes 已提交
37
#include <linux/console.h>
38
#include <linux/module.h>
39
#include <drm/drm_crtc_helper.h>
J
Jesse Barnes 已提交
40

41 42
static struct drm_driver driver;

43 44 45 46 47 48 49 50 51 52
#define GEN_DEFAULT_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
	.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
	.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }


53
static const struct intel_device_info intel_i830_info = {
54
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55
	.has_overlay = 1, .overlay_needs_physical = 1,
56
	.ring_mask = RENDER_RING,
57
	GEN_DEFAULT_PIPEOFFSETS,
58 59
};

60
static const struct intel_device_info intel_845g_info = {
61
	.gen = 2, .num_pipes = 1,
62
	.has_overlay = 1, .overlay_needs_physical = 1,
63
	.ring_mask = RENDER_RING,
64
	GEN_DEFAULT_PIPEOFFSETS,
65 66
};

67
static const struct intel_device_info intel_i85x_info = {
68
	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
69
	.cursor_needs_physical = 1,
70
	.has_overlay = 1, .overlay_needs_physical = 1,
71
	.has_fbc = 1,
72
	.ring_mask = RENDER_RING,
73
	GEN_DEFAULT_PIPEOFFSETS,
74 75
};

76
static const struct intel_device_info intel_i865g_info = {
77
	.gen = 2, .num_pipes = 1,
78
	.has_overlay = 1, .overlay_needs_physical = 1,
79
	.ring_mask = RENDER_RING,
80
	GEN_DEFAULT_PIPEOFFSETS,
81 82
};

83
static const struct intel_device_info intel_i915g_info = {
84
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
85
	.has_overlay = 1, .overlay_needs_physical = 1,
86
	.ring_mask = RENDER_RING,
87
	GEN_DEFAULT_PIPEOFFSETS,
88
};
89
static const struct intel_device_info intel_i915gm_info = {
90
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
91
	.cursor_needs_physical = 1,
92
	.has_overlay = 1, .overlay_needs_physical = 1,
93
	.supports_tv = 1,
94
	.has_fbc = 1,
95
	.ring_mask = RENDER_RING,
96
	GEN_DEFAULT_PIPEOFFSETS,
97
};
98
static const struct intel_device_info intel_i945g_info = {
99
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100
	.has_overlay = 1, .overlay_needs_physical = 1,
101
	.ring_mask = RENDER_RING,
102
	GEN_DEFAULT_PIPEOFFSETS,
103
};
104
static const struct intel_device_info intel_i945gm_info = {
105
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
106
	.has_hotplug = 1, .cursor_needs_physical = 1,
107
	.has_overlay = 1, .overlay_needs_physical = 1,
108
	.supports_tv = 1,
109
	.has_fbc = 1,
110
	.ring_mask = RENDER_RING,
111
	GEN_DEFAULT_PIPEOFFSETS,
112 113
};

114
static const struct intel_device_info intel_i965g_info = {
115
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
116
	.has_hotplug = 1,
117
	.has_overlay = 1,
118
	.ring_mask = RENDER_RING,
119
	GEN_DEFAULT_PIPEOFFSETS,
120 121
};

122
static const struct intel_device_info intel_i965gm_info = {
123
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
124
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125
	.has_overlay = 1,
126
	.supports_tv = 1,
127
	.ring_mask = RENDER_RING,
128
	GEN_DEFAULT_PIPEOFFSETS,
129 130
};

131
static const struct intel_device_info intel_g33_info = {
132
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
133
	.need_gfx_hws = 1, .has_hotplug = 1,
134
	.has_overlay = 1,
135
	.ring_mask = RENDER_RING,
136
	GEN_DEFAULT_PIPEOFFSETS,
137 138
};

139
static const struct intel_device_info intel_g45_info = {
140
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141
	.has_pipe_cxsr = 1, .has_hotplug = 1,
142
	.ring_mask = RENDER_RING | BSD_RING,
143
	GEN_DEFAULT_PIPEOFFSETS,
144 145
};

146
static const struct intel_device_info intel_gm45_info = {
147
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
148
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
149
	.has_pipe_cxsr = 1, .has_hotplug = 1,
150
	.supports_tv = 1,
151
	.ring_mask = RENDER_RING | BSD_RING,
152
	GEN_DEFAULT_PIPEOFFSETS,
153 154
};

155
static const struct intel_device_info intel_pineview_info = {
156
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
157
	.need_gfx_hws = 1, .has_hotplug = 1,
158
	.has_overlay = 1,
159
	GEN_DEFAULT_PIPEOFFSETS,
160 161
};

162
static const struct intel_device_info intel_ironlake_d_info = {
163
	.gen = 5, .num_pipes = 2,
164
	.need_gfx_hws = 1, .has_hotplug = 1,
165
	.ring_mask = RENDER_RING | BSD_RING,
166
	GEN_DEFAULT_PIPEOFFSETS,
167 168
};

169
static const struct intel_device_info intel_ironlake_m_info = {
170
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
171
	.need_gfx_hws = 1, .has_hotplug = 1,
172
	.has_fbc = 1,
173
	.ring_mask = RENDER_RING | BSD_RING,
174
	GEN_DEFAULT_PIPEOFFSETS,
175 176
};

177
static const struct intel_device_info intel_sandybridge_d_info = {
178
	.gen = 6, .num_pipes = 2,
179
	.need_gfx_hws = 1, .has_hotplug = 1,
180
	.has_fbc = 1,
181
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182
	.has_llc = 1,
183
	GEN_DEFAULT_PIPEOFFSETS,
184 185
};

186
static const struct intel_device_info intel_sandybridge_m_info = {
187
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
188
	.need_gfx_hws = 1, .has_hotplug = 1,
189
	.has_fbc = 1,
190
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191
	.has_llc = 1,
192
	GEN_DEFAULT_PIPEOFFSETS,
193 194
};

195 196 197
#define GEN7_FEATURES  \
	.gen = 7, .num_pipes = 3, \
	.need_gfx_hws = 1, .has_hotplug = 1, \
198
	.has_fbc = 1, \
199
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
200
	.has_llc = 1
201

202
static const struct intel_device_info intel_ivybridge_d_info = {
203 204
	GEN7_FEATURES,
	.is_ivybridge = 1,
205
	GEN_DEFAULT_PIPEOFFSETS,
206 207 208
};

static const struct intel_device_info intel_ivybridge_m_info = {
209 210 211
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.is_mobile = 1,
212
	GEN_DEFAULT_PIPEOFFSETS,
213 214
};

215 216 217 218
static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
219
	GEN_DEFAULT_PIPEOFFSETS,
220 221
};

222
static const struct intel_device_info intel_valleyview_m_info = {
223 224 225
	GEN7_FEATURES,
	.is_mobile = 1,
	.num_pipes = 2,
226
	.is_valleyview = 1,
227
	.display_mmio_offset = VLV_DISPLAY_BASE,
228
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
229
	.has_llc = 0, /* legal, last one wins */
230
	GEN_DEFAULT_PIPEOFFSETS,
231 232 233
};

static const struct intel_device_info intel_valleyview_d_info = {
234 235
	GEN7_FEATURES,
	.num_pipes = 2,
236
	.is_valleyview = 1,
237
	.display_mmio_offset = VLV_DISPLAY_BASE,
238
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
239
	.has_llc = 0, /* legal, last one wins */
240
	GEN_DEFAULT_PIPEOFFSETS,
241 242
};

243
static const struct intel_device_info intel_haswell_d_info = {
244 245
	GEN7_FEATURES,
	.is_haswell = 1,
246
	.has_ddi = 1,
247
	.has_fpga_dbg = 1,
248
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249
	GEN_DEFAULT_PIPEOFFSETS,
250 251 252
};

static const struct intel_device_info intel_haswell_m_info = {
253 254 255
	GEN7_FEATURES,
	.is_haswell = 1,
	.is_mobile = 1,
256
	.has_ddi = 1,
257
	.has_fpga_dbg = 1,
258
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259
	GEN_DEFAULT_PIPEOFFSETS,
260 261
};

B
Ben Widawsky 已提交
262
static const struct intel_device_info intel_broadwell_d_info = {
263
	.gen = 8, .num_pipes = 3,
B
Ben Widawsky 已提交
264 265 266 267
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
268
	GEN_DEFAULT_PIPEOFFSETS,
B
Ben Widawsky 已提交
269 270 271
};

static const struct intel_device_info intel_broadwell_m_info = {
272
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
B
Ben Widawsky 已提交
273 274 275 276
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
277
	GEN_DEFAULT_PIPEOFFSETS,
B
Ben Widawsky 已提交
278 279
};

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/*
 * Make sure any device matches here are from most specific to most
 * general.  For example, since the Quanta match is based on the subsystem
 * and subvendor IDs, we need it to come before the more general IVB
 * PCI ID matches, otherwise we'll use the wrong info struct above.
 */
#define INTEL_PCI_IDS \
	INTEL_I830_IDS(&intel_i830_info),	\
	INTEL_I845G_IDS(&intel_845g_info),	\
	INTEL_I85X_IDS(&intel_i85x_info),	\
	INTEL_I865G_IDS(&intel_i865g_info),	\
	INTEL_I915G_IDS(&intel_i915g_info),	\
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
	INTEL_I945G_IDS(&intel_i945g_info),	\
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
	INTEL_I965G_IDS(&intel_i965g_info),	\
	INTEL_G33_IDS(&intel_g33_info),		\
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
	INTEL_GM45_IDS(&intel_gm45_info), 	\
	INTEL_G45_IDS(&intel_g45_info), 	\
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
B
Ben Widawsky 已提交
311 312 313
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
314

315
static const struct pci_device_id pciidlist[] = {		/* aka */
316
	INTEL_PCI_IDS,
317
	{0, 0, 0}
L
Linus Torvalds 已提交
318 319
};

J
Jesse Barnes 已提交
320 321 322 323
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif

324
void intel_detect_pch(struct drm_device *dev)
325 326 327 328
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct pci_dev *pch;

B
Ben Widawsky 已提交
329 330 331 332 333 334 335 336
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

337 338 339 340 341
	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
342 343 344 345 346
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
347 348
	 */
	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
349 350
	while (pch) {
		struct pci_dev *curr = pch;
351
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
352
			unsigned short id;
353
			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
354
			dev_priv->pch_id = id;
355

356 357 358
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
359
				WARN_ON(!IS_GEN5(dev));
360
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
361 362
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
363
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
J
Jesse Barnes 已提交
364 365 366
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
367
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
368
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
369 370 371
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
372
				WARN_ON(!IS_HASWELL(dev));
373
				WARN_ON(IS_ULT(dev));
374 375 376 377 378 379
			} else if (IS_BROADWELL(dev)) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->pch_id =
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
				DRM_DEBUG_KMS("This is Broadwell, assuming "
					      "LynxPoint LP PCH\n");
380 381 382 383 384
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(!IS_ULT(dev));
385 386
			} else {
				goto check_next;
387
			}
388 389
			pci_dev_put(pch);
			break;
390
		}
391 392 393
check_next:
		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
		pci_dev_put(curr);
394
	}
395 396
	if (!pch)
		DRM_DEBUG_KMS("No PCH found?\n");
397 398
}

399 400 401
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
402
		return false;
403

B
Ben Widawsky 已提交
404 405
	/* Until we get further testing... */
	if (IS_GEN8(dev)) {
406
		WARN_ON(!i915.preliminary_hw_support);
407
		return false;
B
Ben Widawsky 已提交
408 409
	}

410 411
	if (i915.semaphores >= 0)
		return i915.semaphores;
412

413
#ifdef CONFIG_INTEL_IOMMU
414
	/* Enable semaphores on SNB when IO remapping is off */
415 416 417
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
418

419
	return true;
420 421
}

422
static int i915_drm_freeze(struct drm_device *dev)
J
Jesse Barnes 已提交
423
{
424
	struct drm_i915_private *dev_priv = dev->dev_private;
425
	struct drm_crtc *crtc;
426

427 428
	intel_runtime_pm_get(dev_priv);

429 430 431 432 433
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

434 435 436
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
	hsw_disable_package_c8(dev_priv);
437
	intel_display_set_init_power(dev, true);
438

439 440
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
441 442
	pci_save_state(dev->pdev);

443
	/* If KMS is active, we do the leavevt stuff here */
444
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
445 446
		int error;

447
		error = i915_gem_suspend(dev);
448
		if (error) {
449
			dev_err(&dev->pdev->dev,
450 451 452
				"GEM idle failed, resume might fail\n");
			return error;
		}
453

454 455
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);

456
		drm_irq_uninstall(dev);
457
		dev_priv->enable_hotplug_processing = false;
458 459 460 461
		/*
		 * Disable CRTCs directly since we want to preserve sw state
		 * for _thaw.
		 */
462
		mutex_lock(&dev->mode_config.mutex);
463 464
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
			dev_priv->display.crtc_disable(crtc);
465
		mutex_unlock(&dev->mode_config.mutex);
466 467

		intel_modeset_suspend_hw(dev);
468 469
	}

470 471
	i915_gem_suspend_gtt_mappings(dev);

472 473
	i915_save_state(dev);

474
	intel_opregion_fini(dev);
475

476
	console_lock();
477
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
478 479
	console_unlock();

480
	return 0;
481 482
}

483
int i915_suspend(struct drm_device *dev, pm_message_t state)
484 485 486 487 488 489 490 491 492 493 494 495
{
	int error;

	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (state.event == PM_EVENT_PRETHAW)
		return 0;

496 497 498

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
499

500 501 502 503
	error = i915_drm_freeze(dev);
	if (error)
		return error;

504 505 506 507 508
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
J
Jesse Barnes 已提交
509 510 511 512

	return 0;
}

513 514 515 516 517 518 519 520
void intel_console_resume(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private,
			     console_resume_work);
	struct drm_device *dev = dev_priv->dev;

	console_lock();
521
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
522 523 524
	console_unlock();
}

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
static void intel_resume_hotplug(struct drm_device *dev)
{
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct intel_encoder *encoder;

	mutex_lock(&mode_config->mutex);
	DRM_DEBUG_KMS("running encoder hotplug functions\n");

	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
		if (encoder->hot_plug)
			encoder->hot_plug(encoder);

	mutex_unlock(&mode_config->mutex);

	/* Just fire off a uevent and let userspace tell us what to do */
	drm_helper_hpd_irq_event(dev);
}

543
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
J
Jesse Barnes 已提交
544
{
545
	struct drm_i915_private *dev_priv = dev->dev_private;
546
	int error = 0;
547

548 549
	intel_uncore_early_sanitize(dev);

550 551 552 553 554 555 556 557 558
	intel_uncore_sanitize(dev);

	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
	    restore_gtt_mappings) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

559
	intel_power_domains_init_hw(dev);
560

561
	i915_restore_state(dev);
562
	intel_opregion_setup(dev);
563

564 565
	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
P
Paulo Zanoni 已提交
566
		intel_init_pch_refclk(dev);
567
		drm_mode_config_reset(dev);
568

569 570
		mutex_lock(&dev->struct_mutex);

571
		error = i915_gem_init_hw(dev);
572
		mutex_unlock(&dev->struct_mutex);
573

574 575 576
		/* We need working interrupts for modeset enabling ... */
		drm_irq_install(dev);

577
		intel_modeset_init_hw(dev);
578 579 580 581

		drm_modeset_lock_all(dev);
		intel_modeset_setup_hw_state(dev, true);
		drm_modeset_unlock_all(dev);
582 583 584 585 586 587 588

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
589
		intel_hpd_init(dev);
590
		dev_priv->enable_hotplug_processing = true;
591 592
		/* Config may have changed between suspend and resume */
		intel_resume_hotplug(dev);
J
Jesse Barnes 已提交
593
	}
594

595 596
	intel_opregion_init(dev);

597 598 599 600 601 602
	/*
	 * The console lock can be pretty contented on resume due
	 * to all the printk activity.  Try to keep it out of the hot
	 * path of resume if possible.
	 */
	if (console_trylock()) {
603
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
604 605 606 607 608
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
	}

609 610 611 612
	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
	 * expected level. */
	hsw_enable_package_c8(dev_priv);

613 614 615
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
616 617

	intel_runtime_pm_put(dev_priv);
618 619 620
	return error;
}

621 622
static int i915_drm_thaw(struct drm_device *dev)
{
623
	if (drm_core_check_feature(dev, DRIVER_MODESET))
624
		i915_check_and_clear_faults(dev);
625

626
	return __i915_drm_thaw(dev, true);
627 628
}

629
int i915_resume(struct drm_device *dev)
630
{
631
	struct drm_i915_private *dev_priv = dev->dev_private;
632 633
	int ret;

634 635 636
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

637 638 639 640 641
	if (pci_enable_device(dev->pdev))
		return -EIO;

	pci_set_master(dev->pdev);

642 643
	/*
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
644 645
	 * earlier) need to restore the GTT mappings since the BIOS might clear
	 * all our scratch PTEs.
646
	 */
647
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
648 649 650 651 652
	if (ret)
		return ret;

	drm_kms_helper_poll_enable(dev);
	return 0;
J
Jesse Barnes 已提交
653 654
}

655
/**
656
 * i915_reset - reset chip after a hang
657 658 659 660 661 662 663 664 665 666 667 668 669
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
670
int i915_reset(struct drm_device *dev)
671 672
{
	drm_i915_private_t *dev_priv = dev->dev_private;
673
	bool simulated;
674
	int ret;
675

676
	if (!i915.reset)
C
Chris Wilson 已提交
677 678
		return 0;

679
	mutex_lock(&dev->struct_mutex);
680

681
	i915_gem_reset(dev);
682

683 684
	simulated = dev_priv->gpu_error.stop_rings != 0;

685 686 687 688 689 690 691
	ret = intel_gpu_reset(dev);

	/* Also reset the gpu hangman. */
	if (simulated) {
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
		dev_priv->gpu_error.stop_rings = 0;
		if (ret == -ENODEV) {
692 693
			DRM_INFO("Reset not implemented, but ignoring "
				 "error for simulated gpu hangs\n");
694 695
			ret = 0;
		}
696
	}
697

698
	if (ret) {
699
		DRM_ERROR("Failed to reset chip: %i\n", ret);
700
		mutex_unlock(&dev->struct_mutex);
701
		return ret;
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
719 720
			!dev_priv->ums.mm_suspended) {
		dev_priv->ums.mm_suspended = 0;
721

722
		ret = i915_gem_init_hw(dev);
723
		mutex_unlock(&dev->struct_mutex);
724 725 726 727
		if (ret) {
			DRM_ERROR("Failed hw init on reset %d\n", ret);
			return ret;
		}
728

729 730
		drm_irq_uninstall(dev);
		drm_irq_install(dev);
731
		intel_hpd_init(dev);
732 733
	} else {
		mutex_unlock(&dev->struct_mutex);
734 735 736 737 738
	}

	return 0;
}

739
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
740
{
741 742 743
	struct intel_device_info *intel_info =
		(struct intel_device_info *) ent->driver_data;

744
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
745 746 747 748 749
		DRM_INFO("This hardware requires preliminary hardware support.\n"
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
		return -ENODEV;
	}

750 751 752 753 754 755 756 757
	/* Only bind to function 0 of the device. Early generations
	 * used function 1 as a placeholder for multi-head. This causes
	 * us confusion instead, especially on the systems where both
	 * functions have the same PCI-ID!
	 */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;

D
Daniel Vetter 已提交
758
	driver.driver_features &= ~(DRIVER_USE_AGP);
759

760
	return drm_get_pci_dev(pdev, ent, &driver);
761 762 763 764 765 766 767 768 769 770
}

static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}

771
static int i915_pm_suspend(struct device *dev)
772
{
773 774 775
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
	int error;
776

777 778 779 780
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
781

782 783 784
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

785 786 787
	error = i915_drm_freeze(drm_dev);
	if (error)
		return error;
788

789 790
	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);
791

792
	return 0;
793 794
}

795
static int i915_pm_resume(struct device *dev)
796
{
797 798 799 800
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume(drm_dev);
801 802
}

803
static int i915_pm_freeze(struct device *dev)
804
{
805 806 807 808 809 810 811 812 813
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	return i915_drm_freeze(drm_dev);
814 815
}

816
static int i915_pm_thaw(struct device *dev)
817
{
818 819 820 821
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw(drm_dev);
822 823
}

824
static int i915_pm_poweroff(struct device *dev)
825
{
826 827 828
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

829
	return i915_drm_freeze(drm_dev);
830 831
}

832 833 834 835 836 837 838 839 840 841
static int i915_runtime_suspend(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;

	WARN_ON(!HAS_RUNTIME_PM(dev));

	DRM_DEBUG_KMS("Suspending device\n");

842 843
	i915_gem_release_all_mmaps(dev_priv);

844
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
845
	dev_priv->pm.suspended = true;
846 847 848 849 850 851 852 853 854

	/*
	 * current versions of firmware which depend on this opregion
	 * notification have repurposed the D1 definition to mean
	 * "runtime suspended" vs. what you would normally expect (D3)
	 * to distinguish it from notifications that might be sent
	 * via the suspend path.
	 */
	intel_opregion_notify_adapter(dev, PCI_D1);
855 856 857 858 859 860 861 862 863 864 865 866 867 868

	return 0;
}

static int i915_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;

	WARN_ON(!HAS_RUNTIME_PM(dev));

	DRM_DEBUG_KMS("Resuming device\n");

869
	intel_opregion_notify_adapter(dev, PCI_D0);
870 871 872 873 874
	dev_priv->pm.suspended = false;

	return 0;
}

875
static const struct dev_pm_ops i915_pm_ops = {
876 877 878 879 880 881
	.suspend = i915_pm_suspend,
	.resume = i915_pm_resume,
	.freeze = i915_pm_freeze,
	.thaw = i915_pm_thaw,
	.poweroff = i915_pm_poweroff,
	.restore = i915_pm_resume,
882 883
	.runtime_suspend = i915_runtime_suspend,
	.runtime_resume = i915_runtime_resume,
884 885
};

886
static const struct vm_operations_struct i915_gem_vm_ops = {
887
	.fault = i915_gem_fault,
888 889
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
890 891
};

892 893 894 895 896 897 898 899 900 901 902 903 904 905
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

L
Linus Torvalds 已提交
906
static struct drm_driver driver = {
907 908
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
909
	 */
910
	.driver_features =
D
Daniel Vetter 已提交
911
	    DRIVER_USE_AGP |
912 913
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
	    DRIVER_RENDER,
914
	.load = i915_driver_load,
J
Jesse Barnes 已提交
915
	.unload = i915_driver_unload,
916
	.open = i915_driver_open,
917 918
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
919
	.postclose = i915_driver_postclose,
920 921 922 923 924

	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
	.suspend = i915_suspend,
	.resume = i915_resume,

925
	.device_is_agp = i915_driver_device_is_agp,
926 927
	.master_create = i915_master_create,
	.master_destroy = i915_master_destroy,
928
#if defined(CONFIG_DEBUG_FS)
929 930
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
931
#endif
932
	.gem_free_object = i915_gem_free_object,
933
	.gem_vm_ops = &i915_gem_vm_ops,
934 935 936 937 938 939

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

940 941
	.dumb_create = i915_gem_dumb_create,
	.dumb_map_offset = i915_gem_mmap_gtt,
942
	.dumb_destroy = drm_gem_dumb_destroy,
L
Linus Torvalds 已提交
943
	.ioctls = i915_ioctls,
944
	.fops = &i915_driver_fops,
945 946 947 948 949 950
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
951 952
};

953 954 955 956 957 958 959 960
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};

L
Linus Torvalds 已提交
961 962 963
static int __init i915_init(void)
{
	driver.num_ioctls = i915_max_ioctl;
J
Jesse Barnes 已提交
964 965 966 967 968 969 970 971 972 973 974

	/*
	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
	 * explicitly disabled with the module pararmeter.
	 *
	 * Otherwise, just follow the parameter (defaulting to off).
	 *
	 * Allow optional vga_text_mode_force boot option to override
	 * the default behavior.
	 */
#if defined(CONFIG_DRM_I915_KMS)
975
	if (i915.modeset != 0)
J
Jesse Barnes 已提交
976 977
		driver.driver_features |= DRIVER_MODESET;
#endif
978
	if (i915.modeset == 1)
J
Jesse Barnes 已提交
979 980 981
		driver.driver_features |= DRIVER_MODESET;

#ifdef CONFIG_VGA_CONSOLE
982
	if (vgacon_text_force() && i915.modeset == -1)
J
Jesse Barnes 已提交
983 984 985
		driver.driver_features &= ~DRIVER_MODESET;
#endif

D
Daniel Vetter 已提交
986
	if (!(driver.driver_features & DRIVER_MODESET)) {
987
		driver.get_vblank_timestamp = NULL;
D
Daniel Vetter 已提交
988 989 990 991 992
#ifndef CONFIG_DRM_I915_UMS
		/* Silently fail loading to not upset userspace. */
		return 0;
#endif
	}
993

994
	return drm_pci_init(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
995 996 997 998
}

static void __exit i915_exit(void)
{
999 1000 1001 1002 1003
#ifndef CONFIG_DRM_I915_UMS
	if (!(driver.driver_features & DRIVER_MODESET))
		return; /* Never loaded a driver. */
#endif

1004
	drm_pci_exit(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1005 1006 1007 1008 1009
}

module_init(i915_init);
module_exit(i915_exit);

D
Dave Airlie 已提交
1010 1011
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
L
Linus Torvalds 已提交
1012
MODULE_LICENSE("GPL and additional rights");