i915_drv.c 44.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/device.h>
31
#include <linux/acpi.h>
32 33
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
L
Linus Torvalds 已提交
37

J
Jesse Barnes 已提交
38
#include <linux/console.h>
39
#include <linux/module.h>
40
#include <linux/pm_runtime.h>
41
#include <drm/drm_crtc_helper.h>
J
Jesse Barnes 已提交
42

43 44
static struct drm_driver driver;

45 46 47 48 49 50 51
#define GEN_DEFAULT_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }

52 53 54 55 56 57 58
#define GEN_CHV_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  CHV_PIPE_C_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   CHV_TRANSCODER_C_OFFSET, }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
			     CHV_PALETTE_C_OFFSET }
59

60 61 62 63 64 65
#define CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }

#define IVB_CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }

66
static const struct intel_device_info intel_i830_info = {
67
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
68
	.has_overlay = 1, .overlay_needs_physical = 1,
69
	.ring_mask = RENDER_RING,
70
	GEN_DEFAULT_PIPEOFFSETS,
71
	CURSOR_OFFSETS,
72 73
};

74
static const struct intel_device_info intel_845g_info = {
75
	.gen = 2, .num_pipes = 1,
76
	.has_overlay = 1, .overlay_needs_physical = 1,
77
	.ring_mask = RENDER_RING,
78
	GEN_DEFAULT_PIPEOFFSETS,
79
	CURSOR_OFFSETS,
80 81
};

82
static const struct intel_device_info intel_i85x_info = {
83
	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
84
	.cursor_needs_physical = 1,
85
	.has_overlay = 1, .overlay_needs_physical = 1,
86
	.has_fbc = 1,
87
	.ring_mask = RENDER_RING,
88
	GEN_DEFAULT_PIPEOFFSETS,
89
	CURSOR_OFFSETS,
90 91
};

92
static const struct intel_device_info intel_i865g_info = {
93
	.gen = 2, .num_pipes = 1,
94
	.has_overlay = 1, .overlay_needs_physical = 1,
95
	.ring_mask = RENDER_RING,
96
	GEN_DEFAULT_PIPEOFFSETS,
97
	CURSOR_OFFSETS,
98 99
};

100
static const struct intel_device_info intel_i915g_info = {
101
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
102
	.has_overlay = 1, .overlay_needs_physical = 1,
103
	.ring_mask = RENDER_RING,
104
	GEN_DEFAULT_PIPEOFFSETS,
105
	CURSOR_OFFSETS,
106
};
107
static const struct intel_device_info intel_i915gm_info = {
108
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
109
	.cursor_needs_physical = 1,
110
	.has_overlay = 1, .overlay_needs_physical = 1,
111
	.supports_tv = 1,
112
	.has_fbc = 1,
113
	.ring_mask = RENDER_RING,
114
	GEN_DEFAULT_PIPEOFFSETS,
115
	CURSOR_OFFSETS,
116
};
117
static const struct intel_device_info intel_i945g_info = {
118
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
119
	.has_overlay = 1, .overlay_needs_physical = 1,
120
	.ring_mask = RENDER_RING,
121
	GEN_DEFAULT_PIPEOFFSETS,
122
	CURSOR_OFFSETS,
123
};
124
static const struct intel_device_info intel_i945gm_info = {
125
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
126
	.has_hotplug = 1, .cursor_needs_physical = 1,
127
	.has_overlay = 1, .overlay_needs_physical = 1,
128
	.supports_tv = 1,
129
	.has_fbc = 1,
130
	.ring_mask = RENDER_RING,
131
	GEN_DEFAULT_PIPEOFFSETS,
132
	CURSOR_OFFSETS,
133 134
};

135
static const struct intel_device_info intel_i965g_info = {
136
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
137
	.has_hotplug = 1,
138
	.has_overlay = 1,
139
	.ring_mask = RENDER_RING,
140
	GEN_DEFAULT_PIPEOFFSETS,
141
	CURSOR_OFFSETS,
142 143
};

144
static const struct intel_device_info intel_i965gm_info = {
145
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
146
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
147
	.has_overlay = 1,
148
	.supports_tv = 1,
149
	.ring_mask = RENDER_RING,
150
	GEN_DEFAULT_PIPEOFFSETS,
151
	CURSOR_OFFSETS,
152 153
};

154
static const struct intel_device_info intel_g33_info = {
155
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
156
	.need_gfx_hws = 1, .has_hotplug = 1,
157
	.has_overlay = 1,
158
	.ring_mask = RENDER_RING,
159
	GEN_DEFAULT_PIPEOFFSETS,
160
	CURSOR_OFFSETS,
161 162
};

163
static const struct intel_device_info intel_g45_info = {
164
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
165
	.has_pipe_cxsr = 1, .has_hotplug = 1,
166
	.ring_mask = RENDER_RING | BSD_RING,
167
	GEN_DEFAULT_PIPEOFFSETS,
168
	CURSOR_OFFSETS,
169 170
};

171
static const struct intel_device_info intel_gm45_info = {
172
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
173
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
174
	.has_pipe_cxsr = 1, .has_hotplug = 1,
175
	.supports_tv = 1,
176
	.ring_mask = RENDER_RING | BSD_RING,
177
	GEN_DEFAULT_PIPEOFFSETS,
178
	CURSOR_OFFSETS,
179 180
};

181
static const struct intel_device_info intel_pineview_info = {
182
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
183
	.need_gfx_hws = 1, .has_hotplug = 1,
184
	.has_overlay = 1,
185
	GEN_DEFAULT_PIPEOFFSETS,
186
	CURSOR_OFFSETS,
187 188
};

189
static const struct intel_device_info intel_ironlake_d_info = {
190
	.gen = 5, .num_pipes = 2,
191
	.need_gfx_hws = 1, .has_hotplug = 1,
192
	.ring_mask = RENDER_RING | BSD_RING,
193
	GEN_DEFAULT_PIPEOFFSETS,
194
	CURSOR_OFFSETS,
195 196
};

197
static const struct intel_device_info intel_ironlake_m_info = {
198
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
199
	.need_gfx_hws = 1, .has_hotplug = 1,
200
	.has_fbc = 1,
201
	.ring_mask = RENDER_RING | BSD_RING,
202
	GEN_DEFAULT_PIPEOFFSETS,
203
	CURSOR_OFFSETS,
204 205
};

206
static const struct intel_device_info intel_sandybridge_d_info = {
207
	.gen = 6, .num_pipes = 2,
208
	.need_gfx_hws = 1, .has_hotplug = 1,
209
	.has_fbc = 1,
210
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
211
	.has_llc = 1,
212
	GEN_DEFAULT_PIPEOFFSETS,
213
	CURSOR_OFFSETS,
214 215
};

216
static const struct intel_device_info intel_sandybridge_m_info = {
217
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
218
	.need_gfx_hws = 1, .has_hotplug = 1,
219
	.has_fbc = 1,
220
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
221
	.has_llc = 1,
222
	GEN_DEFAULT_PIPEOFFSETS,
223
	CURSOR_OFFSETS,
224 225
};

226 227 228
#define GEN7_FEATURES  \
	.gen = 7, .num_pipes = 3, \
	.need_gfx_hws = 1, .has_hotplug = 1, \
229
	.has_fbc = 1, \
230
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231
	.has_llc = 1
232

233
static const struct intel_device_info intel_ivybridge_d_info = {
234 235
	GEN7_FEATURES,
	.is_ivybridge = 1,
236
	GEN_DEFAULT_PIPEOFFSETS,
237
	IVB_CURSOR_OFFSETS,
238 239 240
};

static const struct intel_device_info intel_ivybridge_m_info = {
241 242 243
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.is_mobile = 1,
244
	GEN_DEFAULT_PIPEOFFSETS,
245
	IVB_CURSOR_OFFSETS,
246 247
};

248 249 250 251
static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
252
	GEN_DEFAULT_PIPEOFFSETS,
253
	IVB_CURSOR_OFFSETS,
254 255
};

256
static const struct intel_device_info intel_valleyview_m_info = {
257 258 259
	GEN7_FEATURES,
	.is_mobile = 1,
	.num_pipes = 2,
260
	.is_valleyview = 1,
261
	.display_mmio_offset = VLV_DISPLAY_BASE,
262
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
263
	.has_llc = 0, /* legal, last one wins */
264
	GEN_DEFAULT_PIPEOFFSETS,
265
	CURSOR_OFFSETS,
266 267 268
};

static const struct intel_device_info intel_valleyview_d_info = {
269 270
	GEN7_FEATURES,
	.num_pipes = 2,
271
	.is_valleyview = 1,
272
	.display_mmio_offset = VLV_DISPLAY_BASE,
273
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
274
	.has_llc = 0, /* legal, last one wins */
275
	GEN_DEFAULT_PIPEOFFSETS,
276
	CURSOR_OFFSETS,
277 278
};

279
static const struct intel_device_info intel_haswell_d_info = {
280 281
	GEN7_FEATURES,
	.is_haswell = 1,
282
	.has_ddi = 1,
283
	.has_fpga_dbg = 1,
284
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285
	GEN_DEFAULT_PIPEOFFSETS,
286
	IVB_CURSOR_OFFSETS,
287 288 289
};

static const struct intel_device_info intel_haswell_m_info = {
290 291 292
	GEN7_FEATURES,
	.is_haswell = 1,
	.is_mobile = 1,
293
	.has_ddi = 1,
294
	.has_fpga_dbg = 1,
295
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296
	GEN_DEFAULT_PIPEOFFSETS,
297
	IVB_CURSOR_OFFSETS,
298 299
};

B
Ben Widawsky 已提交
300
static const struct intel_device_info intel_broadwell_d_info = {
301
	.gen = 8, .num_pipes = 3,
B
Ben Widawsky 已提交
302 303 304 305
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
B
Ben Widawsky 已提交
306
	.has_fbc = 1,
307
	GEN_DEFAULT_PIPEOFFSETS,
308
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
309 310 311
};

static const struct intel_device_info intel_broadwell_m_info = {
312
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
B
Ben Widawsky 已提交
313 314 315 316
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
B
Ben Widawsky 已提交
317
	.has_fbc = 1,
318
	GEN_DEFAULT_PIPEOFFSETS,
319
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
320 321
};

322 323 324
static const struct intel_device_info intel_broadwell_gt3d_info = {
	.gen = 8, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
325
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
326 327 328 329
	.has_llc = 1,
	.has_ddi = 1,
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
330
	IVB_CURSOR_OFFSETS,
331 332 333 334 335
};

static const struct intel_device_info intel_broadwell_gt3m_info = {
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
336
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
337 338 339 340
	.has_llc = 1,
	.has_ddi = 1,
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
341
	IVB_CURSOR_OFFSETS,
342 343
};

344 345
static const struct intel_device_info intel_cherryview_info = {
	.is_preliminary = 1,
346
	.gen = 8, .num_pipes = 3,
347 348 349 350
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.is_valleyview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
351
	GEN_CHV_PIPEOFFSETS,
352
	CURSOR_OFFSETS,
353 354
};

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
/*
 * Make sure any device matches here are from most specific to most
 * general.  For example, since the Quanta match is based on the subsystem
 * and subvendor IDs, we need it to come before the more general IVB
 * PCI ID matches, otherwise we'll use the wrong info struct above.
 */
#define INTEL_PCI_IDS \
	INTEL_I830_IDS(&intel_i830_info),	\
	INTEL_I845G_IDS(&intel_845g_info),	\
	INTEL_I85X_IDS(&intel_i85x_info),	\
	INTEL_I865G_IDS(&intel_i865g_info),	\
	INTEL_I915G_IDS(&intel_i915g_info),	\
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
	INTEL_I945G_IDS(&intel_i945g_info),	\
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
	INTEL_I965G_IDS(&intel_i965g_info),	\
	INTEL_G33_IDS(&intel_g33_info),		\
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
	INTEL_GM45_IDS(&intel_gm45_info), 	\
	INTEL_G45_IDS(&intel_g45_info), 	\
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
B
Ben Widawsky 已提交
386
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
387 388 389
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
390 391
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
	INTEL_CHV_IDS(&intel_cherryview_info)
392

393
static const struct pci_device_id pciidlist[] = {		/* aka */
394
	INTEL_PCI_IDS,
395
	{0, 0, 0}
L
Linus Torvalds 已提交
396 397
};

J
Jesse Barnes 已提交
398 399 400 401
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif

402
void intel_detect_pch(struct drm_device *dev)
403 404
{
	struct drm_i915_private *dev_priv = dev->dev_private;
405
	struct pci_dev *pch = NULL;
406

B
Ben Widawsky 已提交
407 408 409 410 411 412 413 414
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

415 416 417 418 419
	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
420 421 422 423 424
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
425
	 */
426
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
427
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
428
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
429
			dev_priv->pch_id = id;
430

431 432 433
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
434
				WARN_ON(!IS_GEN5(dev));
435
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
436 437
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
438
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
J
Jesse Barnes 已提交
439 440 441
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
442
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
443
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
444 445 446
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
447
				WARN_ON(!IS_HASWELL(dev));
448
				WARN_ON(IS_ULT(dev));
449 450 451 452 453 454
			} else if (IS_BROADWELL(dev)) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->pch_id =
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
				DRM_DEBUG_KMS("This is Broadwell, assuming "
					      "LynxPoint LP PCH\n");
455 456 457 458 459
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(!IS_ULT(dev));
460 461 462
			} else
				continue;

463
			break;
464 465
		}
	}
466
	if (!pch)
467 468 469
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
470 471
}

472 473 474
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
475
		return false;
476

477 478
	if (i915.semaphores >= 0)
		return i915.semaphores;
479

480 481 482 483
	/* Until we get further testing... */
	if (IS_GEN8(dev))
		return false;

484
#ifdef CONFIG_INTEL_IOMMU
485
	/* Enable semaphores on SNB when IO remapping is off */
486 487 488
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
489

490
	return true;
491 492
}

493
static int i915_drm_freeze(struct drm_device *dev)
J
Jesse Barnes 已提交
494
{
495
	struct drm_i915_private *dev_priv = dev->dev_private;
496
	struct drm_crtc *crtc;
497
	pci_power_t opregion_target_state;
498

499 500
	intel_runtime_pm_get(dev_priv);

501 502 503 504 505
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

506 507
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
508
	intel_display_set_init_power(dev_priv, true);
509

510 511
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
512 513
	pci_save_state(dev->pdev);

514
	/* If KMS is active, we do the leavevt stuff here */
515
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
516 517
		int error;

518
		error = i915_gem_suspend(dev);
519
		if (error) {
520
			dev_err(&dev->pdev->dev,
521 522 523
				"GEM idle failed, resume might fail\n");
			return error;
		}
524

525
		drm_irq_uninstall(dev);
526
		dev_priv->enable_hotplug_processing = false;
527

528
		intel_suspend_gt_powersave(dev);
529

530 531 532 533
		/*
		 * Disable CRTCs directly since we want to preserve sw state
		 * for _thaw.
		 */
534
		drm_modeset_lock_all(dev);
535
		for_each_crtc(dev, crtc) {
536
			dev_priv->display.crtc_disable(crtc);
537
		}
538
		drm_modeset_unlock_all(dev);
539 540

		intel_modeset_suspend_hw(dev);
541 542
	}

543 544
	i915_gem_suspend_gtt_mappings(dev);

545 546
	i915_save_state(dev);

547 548 549 550 551 552
	if (acpi_target_system_state() >= ACPI_STATE_S3)
		opregion_target_state = PCI_D3cold;
	else
		opregion_target_state = PCI_D1;
	intel_opregion_notify_adapter(dev, opregion_target_state);

553
	intel_uncore_forcewake_reset(dev, false);
554
	intel_opregion_fini(dev);
555

556
	console_lock();
557
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
558 559
	console_unlock();

560 561
	dev_priv->suspend_count++;

562 563
	intel_display_set_init_power(dev_priv, false);

564
	return 0;
565 566
}

567
int i915_suspend(struct drm_device *dev, pm_message_t state)
568 569 570 571 572 573 574 575 576 577 578 579
{
	int error;

	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (state.event == PM_EVENT_PRETHAW)
		return 0;

580 581 582

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
583

584 585 586 587
	error = i915_drm_freeze(dev);
	if (error)
		return error;

588 589 590 591 592
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
J
Jesse Barnes 已提交
593 594 595 596

	return 0;
}

597 598 599 600 601 602 603 604
void intel_console_resume(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private,
			     console_resume_work);
	struct drm_device *dev = dev_priv->dev;

	console_lock();
605
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
606 607 608
	console_unlock();
}

609
static int i915_drm_thaw_early(struct drm_device *dev)
J
Jesse Barnes 已提交
610
{
611
	struct drm_i915_private *dev_priv = dev->dev_private;
612

613 614 615
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
		hsw_disable_pc8(dev_priv);

616
	intel_uncore_early_sanitize(dev, true);
617
	intel_uncore_sanitize(dev);
618 619 620 621 622 623 624 625
	intel_power_domains_init_hw(dev_priv);

	return 0;
}

static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
626 627 628 629 630 631 632 633

	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
	    restore_gtt_mappings) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

634
	i915_restore_state(dev);
635
	intel_opregion_setup(dev);
636

637 638
	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
P
Paulo Zanoni 已提交
639
		intel_init_pch_refclk(dev);
640
		drm_mode_config_reset(dev);
641

642
		mutex_lock(&dev->struct_mutex);
643 644 645 646
		if (i915_gem_init_hw(dev)) {
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
		}
647
		mutex_unlock(&dev->struct_mutex);
648

649
		/* We need working interrupts for modeset enabling ... */
650
		drm_irq_install(dev, dev->pdev->irq);
651

652
		intel_modeset_init_hw(dev);
653 654 655 656

		drm_modeset_lock_all(dev);
		intel_modeset_setup_hw_state(dev, true);
		drm_modeset_unlock_all(dev);
657 658 659 660 661 662 663

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
664
		intel_hpd_init(dev);
665
		dev_priv->enable_hotplug_processing = true;
666
		/* Config may have changed between suspend and resume */
667
		drm_helper_hpd_irq_event(dev);
J
Jesse Barnes 已提交
668
	}
669

670 671
	intel_opregion_init(dev);

672 673 674 675 676 677
	/*
	 * The console lock can be pretty contented on resume due
	 * to all the printk activity.  Try to keep it out of the hot
	 * path of resume if possible.
	 */
	if (console_trylock()) {
678
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
679 680 681 682 683
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
	}

684 685 686
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
687

688 689
	intel_opregion_notify_adapter(dev, PCI_D0);

690
	intel_runtime_pm_put(dev_priv);
691
	return 0;
692 693
}

694 695
static int i915_drm_thaw(struct drm_device *dev)
{
696
	if (drm_core_check_feature(dev, DRIVER_MODESET))
697
		i915_check_and_clear_faults(dev);
698

699
	return __i915_drm_thaw(dev, true);
700 701
}

702
static int i915_resume_early(struct drm_device *dev)
703
{
704 705 706
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

707 708 709 710 711 712 713 714 715
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
716 717 718 719 720
	if (pci_enable_device(dev->pdev))
		return -EIO;

	pci_set_master(dev->pdev);

721 722 723 724 725 726 727 728
	return i915_drm_thaw_early(dev);
}

int i915_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

729 730
	/*
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
731 732
	 * earlier) need to restore the GTT mappings since the BIOS might clear
	 * all our scratch PTEs.
733
	 */
734
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
735 736 737 738 739
	if (ret)
		return ret;

	drm_kms_helper_poll_enable(dev);
	return 0;
J
Jesse Barnes 已提交
740 741
}

742 743 744 745 746 747 748 749
static int i915_resume_legacy(struct drm_device *dev)
{
	i915_resume_early(dev);
	i915_resume(dev);

	return 0;
}

750
/**
751
 * i915_reset - reset chip after a hang
752 753 754 755 756 757 758 759 760 761 762 763 764
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
765
int i915_reset(struct drm_device *dev)
766
{
767
	struct drm_i915_private *dev_priv = dev->dev_private;
768
	bool simulated;
769
	int ret;
770

771
	if (!i915.reset)
C
Chris Wilson 已提交
772 773
		return 0;

774
	mutex_lock(&dev->struct_mutex);
775

776
	i915_gem_reset(dev);
777

778 779
	simulated = dev_priv->gpu_error.stop_rings != 0;

780 781 782 783 784 785 786
	ret = intel_gpu_reset(dev);

	/* Also reset the gpu hangman. */
	if (simulated) {
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
		dev_priv->gpu_error.stop_rings = 0;
		if (ret == -ENODEV) {
787 788
			DRM_INFO("Reset not implemented, but ignoring "
				 "error for simulated gpu hangs\n");
789 790
			ret = 0;
		}
791
	}
792

793
	if (ret) {
794
		DRM_ERROR("Failed to reset chip: %i\n", ret);
795
		mutex_unlock(&dev->struct_mutex);
796
		return ret;
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
814 815
			!dev_priv->ums.mm_suspended) {
		dev_priv->ums.mm_suspended = 0;
816

817
		ret = i915_gem_init_hw(dev);
818
		mutex_unlock(&dev->struct_mutex);
819 820 821 822
		if (ret) {
			DRM_ERROR("Failed hw init on reset %d\n", ret);
			return ret;
		}
823

824
		/*
825 826 827
		 * FIXME: This races pretty badly against concurrent holders of
		 * ring interrupts. This is possible since we've started to drop
		 * dev->struct_mutex in select places when waiting for the gpu.
828
		 */
J
Jeff McGee 已提交
829

830 831 832
		/*
		 * rps/rc6 re-init is necessary to restore state lost after the
		 * reset and the re-install of gt irqs. Skip for ironlake per
J
Jeff McGee 已提交
833
		 * previous concerns that it doesn't respond well to some forms
834 835
		 * of re-init after reset.
		 */
836
		if (INTEL_INFO(dev)->gen > 5)
837
			intel_reset_gt_powersave(dev);
J
Jeff McGee 已提交
838

839
		intel_hpd_init(dev);
840 841
	} else {
		mutex_unlock(&dev->struct_mutex);
842 843 844 845 846
	}

	return 0;
}

847
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
848
{
849 850 851
	struct intel_device_info *intel_info =
		(struct intel_device_info *) ent->driver_data;

852
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
853 854 855 856 857
		DRM_INFO("This hardware requires preliminary hardware support.\n"
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
		return -ENODEV;
	}

858 859 860 861 862 863 864 865
	/* Only bind to function 0 of the device. Early generations
	 * used function 1 as a placeholder for multi-head. This causes
	 * us confusion instead, especially on the systems where both
	 * functions have the same PCI-ID!
	 */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;

D
Daniel Vetter 已提交
866
	driver.driver_features &= ~(DRIVER_USE_AGP);
867

868
	return drm_get_pci_dev(pdev, ent, &driver);
869 870 871 872 873 874 875 876 877 878
}

static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}

879
static int i915_pm_suspend(struct device *dev)
880
{
881 882
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
883

884 885 886 887
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
888

889 890 891
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

892 893 894 895 896 897 898
	return i915_drm_freeze(drm_dev);
}

static int i915_pm_suspend_late(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
899
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
900 901 902 903 904 905 906 907 908 909 910 911

	/*
	 * We have a suspedn ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
912

913 914 915
	if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
		hsw_enable_pc8(dev_priv);

916 917
	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);
918

919
	return 0;
920 921
}

922 923 924 925 926 927 928 929
static int i915_pm_resume_early(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume_early(drm_dev);
}

930
static int i915_pm_resume(struct device *dev)
931
{
932 933 934 935
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume(drm_dev);
936 937
}

938
static int i915_pm_freeze(struct device *dev)
939
{
940 941 942 943 944 945 946 947 948
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	return i915_drm_freeze(drm_dev);
949 950
}

951 952 953 954 955 956 957 958
static int i915_pm_thaw_early(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw_early(drm_dev);
}

959
static int i915_pm_thaw(struct device *dev)
960
{
961 962 963 964
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw(drm_dev);
965 966
}

967
static int i915_pm_poweroff(struct device *dev)
968
{
969 970 971
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

972
	return i915_drm_freeze(drm_dev);
973 974
}

975
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
976
{
P
Paulo Zanoni 已提交
977
	hsw_enable_pc8(dev_priv);
978 979

	return 0;
980 981
}

982
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
983 984 985 986
{
	struct drm_device *dev = dev_priv->dev;

	intel_init_pch_refclk(dev);
987 988

	return 0;
989 990
}

991
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
992
{
P
Paulo Zanoni 已提交
993
	hsw_disable_pc8(dev_priv);
994 995

	return 0;
996 997
}

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);

#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
	/* Wait for a previous force-off to settle */
	if (force_on) {
1201
		err = wait_for(!COND, 20);
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
		if (err) {
			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
			return err;
		}
	}

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

1218
	err = wait_for(COND, 20);
1219 1220 1221 1222 1223 1224 1225 1226
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
#undef COND
}

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
	u32 val;
	int err = 0;

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
	      allow)
	err = wait_for(COND, 1);
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
	return err;
#undef COND
}

static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				 bool wait_for_on)
{
	u32 mask;
	u32 val;
	int err;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
	if (COND)
		return 0;

	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
			wait_for_on ? "on" : "off",
			I915_READ(VLV_GTLC_PW_STATUS));

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
	 */
	err = wait_for(COND, 3);
	if (err)
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
			  wait_for_on ? "on" : "off");

	return err;
#undef COND
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

	DRM_ERROR("GT register access while GT waking disabled\n");
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
	(void)vlv_wait_for_gt_wells(dev_priv, false);

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
	vlv_save_gunit_s0ix_state(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

	vlv_restore_gunit_s0ix_state(dev_priv);

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

	intel_init_clock_gating(dev);
	i915_gem_restore_fences(dev);

	return ret;
}

1358
static int intel_runtime_suspend(struct device *device)
1359 1360 1361 1362
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1363
	int ret;
1364

1365
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1366 1367
		return -ENODEV;

1368
	WARN_ON(!HAS_RUNTIME_PM(dev));
1369
	assert_force_wake_inactive(dev_priv);
1370 1371 1372

	DRM_DEBUG_KMS("Suspending device\n");

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
	/*
	 * We could deadlock here in case another thread holding struct_mutex
	 * calls RPM suspend concurrently, since the RPM suspend will wait
	 * first for this RPM suspend to finish. In this case the concurrent
	 * RPM resume will be followed by its RPM suspend counterpart. Still
	 * for consistency return -EAGAIN, which will reschedule this suspend.
	 */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
		 * Bump the expiration timestamp, otherwise the suspend won't
		 * be rescheduled.
		 */
		pm_runtime_mark_last_busy(device);

		return -EAGAIN;
	}
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);

1397 1398 1399 1400 1401 1402
	/*
	 * rps.work can't be rearmed here, since we get here only after making
	 * sure the GPU is idle and the RPS freq is set to the minimum. See
	 * intel_mark_idle().
	 */
	cancel_work_sync(&dev_priv->rps.work);
1403 1404
	intel_runtime_pm_disable_interrupts(dev);

1405 1406 1407 1408
	if (IS_GEN6(dev)) {
		ret = 0;
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = hsw_runtime_suspend(dev_priv);
1409 1410
	} else if (IS_VALLEYVIEW(dev)) {
		ret = vlv_runtime_suspend(dev_priv);
1411 1412
	} else {
		ret = -ENODEV;
1413
		WARN_ON(1);
1414 1415 1416 1417 1418 1419 1420 1421
	}

	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
		intel_runtime_pm_restore_interrupts(dev);

		return ret;
	}
1422

1423
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1424
	dev_priv->pm.suspended = true;
1425 1426 1427 1428 1429 1430 1431 1432 1433

	/*
	 * current versions of firmware which depend on this opregion
	 * notification have repurposed the D1 definition to mean
	 * "runtime suspended" vs. what you would normally expect (D3)
	 * to distinguish it from notifications that might be sent
	 * via the suspend path.
	 */
	intel_opregion_notify_adapter(dev, PCI_D1);
1434

1435
	DRM_DEBUG_KMS("Device suspended\n");
1436 1437 1438
	return 0;
}

1439
static int intel_runtime_resume(struct device *device)
1440 1441 1442 1443
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1444
	int ret;
1445 1446 1447 1448 1449

	WARN_ON(!HAS_RUNTIME_PM(dev));

	DRM_DEBUG_KMS("Resuming device\n");

1450
	intel_opregion_notify_adapter(dev, PCI_D0);
1451 1452
	dev_priv->pm.suspended = false;

1453 1454 1455 1456
	if (IS_GEN6(dev)) {
		ret = snb_runtime_resume(dev_priv);
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = hsw_runtime_resume(dev_priv);
1457 1458
	} else if (IS_VALLEYVIEW(dev)) {
		ret = vlv_runtime_resume(dev_priv);
1459
	} else {
1460
		WARN_ON(1);
1461 1462
		ret = -ENODEV;
	}
1463

1464 1465 1466 1467
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1468 1469 1470
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

1471
	intel_runtime_pm_restore_interrupts(dev);
1472
	intel_reset_gt_powersave(dev);
1473

1474 1475 1476 1477 1478 1479
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
1480 1481
}

1482
static const struct dev_pm_ops i915_pm_ops = {
1483
	.suspend = i915_pm_suspend,
1484 1485
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1486 1487
	.resume = i915_pm_resume,
	.freeze = i915_pm_freeze,
1488
	.thaw_early = i915_pm_thaw_early,
1489 1490
	.thaw = i915_pm_thaw,
	.poweroff = i915_pm_poweroff,
1491
	.restore_early = i915_pm_resume_early,
1492
	.restore = i915_pm_resume,
1493 1494
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1495 1496
};

1497
static const struct vm_operations_struct i915_gem_vm_ops = {
1498
	.fault = i915_gem_fault,
1499 1500
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
1501 1502
};

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

L
Linus Torvalds 已提交
1517
static struct drm_driver driver = {
1518 1519
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1520
	 */
1521
	.driver_features =
D
Daniel Vetter 已提交
1522
	    DRIVER_USE_AGP |
1523 1524
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
	    DRIVER_RENDER,
1525
	.load = i915_driver_load,
J
Jesse Barnes 已提交
1526
	.unload = i915_driver_unload,
1527
	.open = i915_driver_open,
1528 1529
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
1530
	.postclose = i915_driver_postclose,
1531 1532 1533

	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
	.suspend = i915_suspend,
1534
	.resume = i915_resume_legacy,
1535

1536
	.device_is_agp = i915_driver_device_is_agp,
1537 1538
	.master_create = i915_master_create,
	.master_destroy = i915_master_destroy,
1539
#if defined(CONFIG_DEBUG_FS)
1540 1541
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
1542
#endif
1543
	.gem_free_object = i915_gem_free_object,
1544
	.gem_vm_ops = &i915_gem_vm_ops,
1545 1546 1547 1548 1549 1550

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1551 1552
	.dumb_create = i915_gem_dumb_create,
	.dumb_map_offset = i915_gem_mmap_gtt,
1553
	.dumb_destroy = drm_gem_dumb_destroy,
L
Linus Torvalds 已提交
1554
	.ioctls = i915_ioctls,
1555
	.fops = &i915_driver_fops,
1556 1557 1558 1559 1560 1561
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1562 1563
};

1564 1565 1566 1567 1568 1569 1570 1571
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};

L
Linus Torvalds 已提交
1572 1573 1574
static int __init i915_init(void)
{
	driver.num_ioctls = i915_max_ioctl;
J
Jesse Barnes 已提交
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585

	/*
	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
	 * explicitly disabled with the module pararmeter.
	 *
	 * Otherwise, just follow the parameter (defaulting to off).
	 *
	 * Allow optional vga_text_mode_force boot option to override
	 * the default behavior.
	 */
#if defined(CONFIG_DRM_I915_KMS)
1586
	if (i915.modeset != 0)
J
Jesse Barnes 已提交
1587 1588
		driver.driver_features |= DRIVER_MODESET;
#endif
1589
	if (i915.modeset == 1)
J
Jesse Barnes 已提交
1590 1591 1592
		driver.driver_features |= DRIVER_MODESET;

#ifdef CONFIG_VGA_CONSOLE
1593
	if (vgacon_text_force() && i915.modeset == -1)
J
Jesse Barnes 已提交
1594 1595 1596
		driver.driver_features &= ~DRIVER_MODESET;
#endif

D
Daniel Vetter 已提交
1597
	if (!(driver.driver_features & DRIVER_MODESET)) {
1598
		driver.get_vblank_timestamp = NULL;
D
Daniel Vetter 已提交
1599 1600
#ifndef CONFIG_DRM_I915_UMS
		/* Silently fail loading to not upset userspace. */
1601
		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
D
Daniel Vetter 已提交
1602 1603 1604
		return 0;
#endif
	}
1605

1606
	return drm_pci_init(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1607 1608 1609 1610
}

static void __exit i915_exit(void)
{
1611 1612 1613 1614 1615
#ifndef CONFIG_DRM_I915_UMS
	if (!(driver.driver_features & DRIVER_MODESET))
		return; /* Never loaded a driver. */
#endif

1616
	drm_pci_exit(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1617 1618 1619 1620 1621
}

module_init(i915_init);
module_exit(i915_exit);

D
Dave Airlie 已提交
1622 1623
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
L
Linus Torvalds 已提交
1624
MODULE_LICENSE("GPL and additional rights");