i915_drv.c 48.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/device.h>
31
#include <linux/acpi.h>
32 33
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
L
Linus Torvalds 已提交
37

J
Jesse Barnes 已提交
38
#include <linux/console.h>
39
#include <linux/module.h>
40
#include <linux/pm_runtime.h>
41
#include <drm/drm_crtc_helper.h>
J
Jesse Barnes 已提交
42

43 44
static struct drm_driver driver;

45 46 47 48 49 50 51
#define GEN_DEFAULT_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }

52 53 54 55 56 57 58
#define GEN_CHV_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  CHV_PIPE_C_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   CHV_TRANSCODER_C_OFFSET, }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
			     CHV_PALETTE_C_OFFSET }
59

60 61 62 63 64 65
#define CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }

#define IVB_CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }

66
static const struct intel_device_info intel_i830_info = {
67
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
68
	.has_overlay = 1, .overlay_needs_physical = 1,
69
	.ring_mask = RENDER_RING,
70
	GEN_DEFAULT_PIPEOFFSETS,
71
	CURSOR_OFFSETS,
72 73
};

74
static const struct intel_device_info intel_845g_info = {
75
	.gen = 2, .num_pipes = 1,
76
	.has_overlay = 1, .overlay_needs_physical = 1,
77
	.ring_mask = RENDER_RING,
78
	GEN_DEFAULT_PIPEOFFSETS,
79
	CURSOR_OFFSETS,
80 81
};

82
static const struct intel_device_info intel_i85x_info = {
83
	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
84
	.cursor_needs_physical = 1,
85
	.has_overlay = 1, .overlay_needs_physical = 1,
86
	.has_fbc = 1,
87
	.ring_mask = RENDER_RING,
88
	GEN_DEFAULT_PIPEOFFSETS,
89
	CURSOR_OFFSETS,
90 91
};

92
static const struct intel_device_info intel_i865g_info = {
93
	.gen = 2, .num_pipes = 1,
94
	.has_overlay = 1, .overlay_needs_physical = 1,
95
	.ring_mask = RENDER_RING,
96
	GEN_DEFAULT_PIPEOFFSETS,
97
	CURSOR_OFFSETS,
98 99
};

100
static const struct intel_device_info intel_i915g_info = {
101
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
102
	.has_overlay = 1, .overlay_needs_physical = 1,
103
	.ring_mask = RENDER_RING,
104
	GEN_DEFAULT_PIPEOFFSETS,
105
	CURSOR_OFFSETS,
106
};
107
static const struct intel_device_info intel_i915gm_info = {
108
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
109
	.cursor_needs_physical = 1,
110
	.has_overlay = 1, .overlay_needs_physical = 1,
111
	.supports_tv = 1,
112
	.has_fbc = 1,
113
	.ring_mask = RENDER_RING,
114
	GEN_DEFAULT_PIPEOFFSETS,
115
	CURSOR_OFFSETS,
116
};
117
static const struct intel_device_info intel_i945g_info = {
118
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
119
	.has_overlay = 1, .overlay_needs_physical = 1,
120
	.ring_mask = RENDER_RING,
121
	GEN_DEFAULT_PIPEOFFSETS,
122
	CURSOR_OFFSETS,
123
};
124
static const struct intel_device_info intel_i945gm_info = {
125
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
126
	.has_hotplug = 1, .cursor_needs_physical = 1,
127
	.has_overlay = 1, .overlay_needs_physical = 1,
128
	.supports_tv = 1,
129
	.has_fbc = 1,
130
	.ring_mask = RENDER_RING,
131
	GEN_DEFAULT_PIPEOFFSETS,
132
	CURSOR_OFFSETS,
133 134
};

135
static const struct intel_device_info intel_i965g_info = {
136
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
137
	.has_hotplug = 1,
138
	.has_overlay = 1,
139
	.ring_mask = RENDER_RING,
140
	GEN_DEFAULT_PIPEOFFSETS,
141
	CURSOR_OFFSETS,
142 143
};

144
static const struct intel_device_info intel_i965gm_info = {
145
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
146
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
147
	.has_overlay = 1,
148
	.supports_tv = 1,
149
	.ring_mask = RENDER_RING,
150
	GEN_DEFAULT_PIPEOFFSETS,
151
	CURSOR_OFFSETS,
152 153
};

154
static const struct intel_device_info intel_g33_info = {
155
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
156
	.need_gfx_hws = 1, .has_hotplug = 1,
157
	.has_overlay = 1,
158
	.ring_mask = RENDER_RING,
159
	GEN_DEFAULT_PIPEOFFSETS,
160
	CURSOR_OFFSETS,
161 162
};

163
static const struct intel_device_info intel_g45_info = {
164
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
165
	.has_pipe_cxsr = 1, .has_hotplug = 1,
166
	.ring_mask = RENDER_RING | BSD_RING,
167
	GEN_DEFAULT_PIPEOFFSETS,
168
	CURSOR_OFFSETS,
169 170
};

171
static const struct intel_device_info intel_gm45_info = {
172
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
173
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
174
	.has_pipe_cxsr = 1, .has_hotplug = 1,
175
	.supports_tv = 1,
176
	.ring_mask = RENDER_RING | BSD_RING,
177
	GEN_DEFAULT_PIPEOFFSETS,
178
	CURSOR_OFFSETS,
179 180
};

181
static const struct intel_device_info intel_pineview_info = {
182
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
183
	.need_gfx_hws = 1, .has_hotplug = 1,
184
	.has_overlay = 1,
185
	GEN_DEFAULT_PIPEOFFSETS,
186
	CURSOR_OFFSETS,
187 188
};

189
static const struct intel_device_info intel_ironlake_d_info = {
190
	.gen = 5, .num_pipes = 2,
191
	.need_gfx_hws = 1, .has_hotplug = 1,
192
	.ring_mask = RENDER_RING | BSD_RING,
193
	GEN_DEFAULT_PIPEOFFSETS,
194
	CURSOR_OFFSETS,
195 196
};

197
static const struct intel_device_info intel_ironlake_m_info = {
198
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
199
	.need_gfx_hws = 1, .has_hotplug = 1,
200
	.has_fbc = 1,
201
	.ring_mask = RENDER_RING | BSD_RING,
202
	GEN_DEFAULT_PIPEOFFSETS,
203
	CURSOR_OFFSETS,
204 205
};

206
static const struct intel_device_info intel_sandybridge_d_info = {
207
	.gen = 6, .num_pipes = 2,
208
	.need_gfx_hws = 1, .has_hotplug = 1,
209
	.has_fbc = 1,
210
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
211
	.has_llc = 1,
212
	GEN_DEFAULT_PIPEOFFSETS,
213
	CURSOR_OFFSETS,
214 215
};

216
static const struct intel_device_info intel_sandybridge_m_info = {
217
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
218
	.need_gfx_hws = 1, .has_hotplug = 1,
219
	.has_fbc = 1,
220
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
221
	.has_llc = 1,
222
	GEN_DEFAULT_PIPEOFFSETS,
223
	CURSOR_OFFSETS,
224 225
};

226 227 228
#define GEN7_FEATURES  \
	.gen = 7, .num_pipes = 3, \
	.need_gfx_hws = 1, .has_hotplug = 1, \
229
	.has_fbc = 1, \
230
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231
	.has_llc = 1
232

233
static const struct intel_device_info intel_ivybridge_d_info = {
234 235
	GEN7_FEATURES,
	.is_ivybridge = 1,
236
	GEN_DEFAULT_PIPEOFFSETS,
237
	IVB_CURSOR_OFFSETS,
238 239 240
};

static const struct intel_device_info intel_ivybridge_m_info = {
241 242 243
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.is_mobile = 1,
244
	GEN_DEFAULT_PIPEOFFSETS,
245
	IVB_CURSOR_OFFSETS,
246 247
};

248 249 250 251
static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
252
	GEN_DEFAULT_PIPEOFFSETS,
253
	IVB_CURSOR_OFFSETS,
254 255
};

256
static const struct intel_device_info intel_valleyview_m_info = {
257 258 259
	GEN7_FEATURES,
	.is_mobile = 1,
	.num_pipes = 2,
260
	.is_valleyview = 1,
261
	.display_mmio_offset = VLV_DISPLAY_BASE,
262
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
263
	.has_llc = 0, /* legal, last one wins */
264
	GEN_DEFAULT_PIPEOFFSETS,
265
	CURSOR_OFFSETS,
266 267 268
};

static const struct intel_device_info intel_valleyview_d_info = {
269 270
	GEN7_FEATURES,
	.num_pipes = 2,
271
	.is_valleyview = 1,
272
	.display_mmio_offset = VLV_DISPLAY_BASE,
273
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
274
	.has_llc = 0, /* legal, last one wins */
275
	GEN_DEFAULT_PIPEOFFSETS,
276
	CURSOR_OFFSETS,
277 278
};

279
static const struct intel_device_info intel_haswell_d_info = {
280 281
	GEN7_FEATURES,
	.is_haswell = 1,
282
	.has_ddi = 1,
283
	.has_fpga_dbg = 1,
284
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285
	GEN_DEFAULT_PIPEOFFSETS,
286
	IVB_CURSOR_OFFSETS,
287 288 289
};

static const struct intel_device_info intel_haswell_m_info = {
290 291 292
	GEN7_FEATURES,
	.is_haswell = 1,
	.is_mobile = 1,
293
	.has_ddi = 1,
294
	.has_fpga_dbg = 1,
295
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296
	GEN_DEFAULT_PIPEOFFSETS,
297
	IVB_CURSOR_OFFSETS,
298 299
};

B
Ben Widawsky 已提交
300
static const struct intel_device_info intel_broadwell_d_info = {
301
	.gen = 8, .num_pipes = 3,
B
Ben Widawsky 已提交
302 303 304 305
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
306
	.has_fpga_dbg = 1,
B
Ben Widawsky 已提交
307
	.has_fbc = 1,
308
	GEN_DEFAULT_PIPEOFFSETS,
309
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
310 311 312
};

static const struct intel_device_info intel_broadwell_m_info = {
313
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
B
Ben Widawsky 已提交
314 315 316 317
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
318
	.has_fpga_dbg = 1,
B
Ben Widawsky 已提交
319
	.has_fbc = 1,
320
	GEN_DEFAULT_PIPEOFFSETS,
321
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
322 323
};

324 325 326
static const struct intel_device_info intel_broadwell_gt3d_info = {
	.gen = 8, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
327
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328 329
	.has_llc = 1,
	.has_ddi = 1,
330
	.has_fpga_dbg = 1,
331 332
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
333
	IVB_CURSOR_OFFSETS,
334 335 336 337 338
};

static const struct intel_device_info intel_broadwell_gt3m_info = {
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
339
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340 341
	.has_llc = 1,
	.has_ddi = 1,
342
	.has_fpga_dbg = 1,
343 344
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
345
	IVB_CURSOR_OFFSETS,
346 347
};

348
static const struct intel_device_info intel_cherryview_info = {
349
	.gen = 8, .num_pipes = 3,
350 351 352 353
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.is_valleyview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
354
	GEN_CHV_PIPEOFFSETS,
355
	CURSOR_OFFSETS,
356 357
};

358 359
static const struct intel_device_info intel_skylake_info = {
	.is_preliminary = 1,
360
	.is_skylake = 1,
361 362 363 364 365
	.gen = 9, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
D
Daisy Sun 已提交
366
	.has_fbc = 1,
367 368 369 370
	GEN_DEFAULT_PIPEOFFSETS,
	IVB_CURSOR_OFFSETS,
};

371 372 373 374 375 376 377 378 379 380 381 382 383
static const struct intel_device_info intel_skylake_gt3_info = {
	.is_preliminary = 1,
	.is_skylake = 1,
	.gen = 9, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
	.has_llc = 1,
	.has_ddi = 1,
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
	IVB_CURSOR_OFFSETS,
};

D
Damien Lespiau 已提交
384 385 386 387 388 389 390
static const struct intel_device_info intel_broxton_info = {
	.is_preliminary = 1,
	.gen = 9,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.num_pipes = 3,
	.has_ddi = 1,
D
Daisy Sun 已提交
391
	.has_fbc = 1,
D
Damien Lespiau 已提交
392 393 394 395
	GEN_DEFAULT_PIPEOFFSETS,
	IVB_CURSOR_OFFSETS,
};

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/*
 * Make sure any device matches here are from most specific to most
 * general.  For example, since the Quanta match is based on the subsystem
 * and subvendor IDs, we need it to come before the more general IVB
 * PCI ID matches, otherwise we'll use the wrong info struct above.
 */
#define INTEL_PCI_IDS \
	INTEL_I830_IDS(&intel_i830_info),	\
	INTEL_I845G_IDS(&intel_845g_info),	\
	INTEL_I85X_IDS(&intel_i85x_info),	\
	INTEL_I865G_IDS(&intel_i865g_info),	\
	INTEL_I915G_IDS(&intel_i915g_info),	\
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
	INTEL_I945G_IDS(&intel_i945g_info),	\
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
	INTEL_I965G_IDS(&intel_i965g_info),	\
	INTEL_G33_IDS(&intel_g33_info),		\
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
	INTEL_GM45_IDS(&intel_gm45_info), 	\
	INTEL_G45_IDS(&intel_g45_info), 	\
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
B
Ben Widawsky 已提交
427
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
428 429 430
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
431
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
432
	INTEL_CHV_IDS(&intel_cherryview_info),	\
433 434
	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
D
Damien Lespiau 已提交
435 436
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
	INTEL_BXT_IDS(&intel_broxton_info)
437

438
static const struct pci_device_id pciidlist[] = {		/* aka */
439
	INTEL_PCI_IDS,
440
	{0, 0, 0}
L
Linus Torvalds 已提交
441 442
};

J
Jesse Barnes 已提交
443 444
MODULE_DEVICE_TABLE(pci, pciidlist);

445
void intel_detect_pch(struct drm_device *dev)
446 447
{
	struct drm_i915_private *dev_priv = dev->dev_private;
448
	struct pci_dev *pch = NULL;
449

B
Ben Widawsky 已提交
450 451 452 453 454 455 456 457
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

458 459 460 461 462
	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
463 464 465 466 467
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
468
	 */
469
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
470
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
471
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
472
			dev_priv->pch_id = id;
473

474 475 476
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
477
				WARN_ON(!IS_GEN5(dev));
478
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
479 480
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
481
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
J
Jesse Barnes 已提交
482 483 484
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
485
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
486
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
487 488 489
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
490 491
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
492 493 494
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
495 496
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
497 498 499 500 501 502 503 504
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
				WARN_ON(!IS_SKYLAKE(dev));
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
				WARN_ON(!IS_SKYLAKE(dev));
505 506 507
			} else
				continue;

508
			break;
509 510
		}
	}
511
	if (!pch)
512 513 514
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
515 516
}

517 518 519
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
520
		return false;
521

522 523
	if (i915.semaphores >= 0)
		return i915.semaphores;
524

525 526 527 528
	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

529 530 531 532
	/* Until we get further testing... */
	if (IS_GEN8(dev))
		return false;

533
#ifdef CONFIG_INTEL_IOMMU
534
	/* Enable semaphores on SNB when IO remapping is off */
535 536 537
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
538

539
	return true;
540 541
}

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
void i915_firmware_load_error_print(const char *fw_path, int err)
{
	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);

	/*
	 * If the reason is not known assume -ENOENT since that's the most
	 * usual failure mode.
	 */
	if (!err)
		err = -ENOENT;

	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
		return;

	DRM_ERROR(
	  "The driver is built-in, so to load the firmware you need to\n"
	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
	  "in your initrd/initramfs image.\n");
}

562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct drm_encoder *encoder;

	drm_modeset_lock_all(dev);
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);

		if (intel_encoder->suspend)
			intel_encoder->suspend(intel_encoder);
	}
	drm_modeset_unlock_all(dev);
}

577
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
578 579
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
580
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
581
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
582

583

584
static int i915_drm_suspend(struct drm_device *dev)
J
Jesse Barnes 已提交
585
{
586
	struct drm_i915_private *dev_priv = dev->dev_private;
587
	pci_power_t opregion_target_state;
588
	int error;
589

590 591 592 593 594
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

595 596
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
597
	intel_display_set_init_power(dev_priv, true);
598

599 600
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
601 602
	pci_save_state(dev->pdev);

603 604 605 606 607 608
	error = i915_gem_suspend(dev);
	if (error) {
		dev_err(&dev->pdev->dev,
			"GEM idle failed, resume might fail\n");
		return error;
	}
609

610
	intel_suspend_gt_powersave(dev);
611

612 613 614 615 616
	/*
	 * Disable CRTCs directly since we want to preserve sw state
	 * for _thaw. Also, power gate the CRTC power wells.
	 */
	drm_modeset_lock_all(dev);
617
	intel_display_suspend(dev);
618
	drm_modeset_unlock_all(dev);
619

620
	intel_dp_mst_suspend(dev);
621

622 623
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
624

625
	intel_suspend_encoders(dev_priv);
626

627
	intel_suspend_hw(dev);
628

629 630
	i915_gem_suspend_gtt_mappings(dev);

631 632
	i915_save_state(dev);

633 634 635
	opregion_target_state = PCI_D3cold;
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
636
		opregion_target_state = PCI_D1;
637
#endif
638 639
	intel_opregion_notify_adapter(dev, opregion_target_state);

640
	intel_uncore_forcewake_reset(dev, false);
641
	intel_opregion_fini(dev);
642

643
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
644

645 646
	dev_priv->suspend_count++;

647 648
	intel_display_set_init_power(dev_priv, false);

649
	return 0;
650 651
}

652
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
653 654 655 656 657 658 659 660 661 662 663 664 665
{
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
	int ret;

	ret = intel_suspend_complete(dev_priv);

	if (ret) {
		DRM_ERROR("Suspend complete failed: %d\n", ret);

		return ret;
	}

	pci_disable_device(drm_dev->pdev);
666 667 668 669 670 671 672 673 674 675 676
	/*
	 * During hibernation on some GEN4 platforms the BIOS may try to access
	 * the device even though it's already in D3 and hang the machine. So
	 * leave the device in D0 on those platforms and hope the BIOS will
	 * power down the device properly. Platforms where this was seen:
	 * Lenovo Thinkpad X301, X61s
	 */
	if (!(hibernation &&
	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
	      INTEL_INFO(dev_priv)->gen == 4))
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
677 678 679 680

	return 0;
}

681
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
682 683 684 685 686 687 688 689 690
{
	int error;

	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

691 692 693
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
			 state.event != PM_EVENT_FREEZE))
		return -EINVAL;
694 695 696

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
697

698
	error = i915_drm_suspend(dev);
699 700 701
	if (error)
		return error;

702
	return i915_drm_suspend_late(dev, false);
J
Jesse Barnes 已提交
703 704
}

705
static int i915_drm_resume(struct drm_device *dev)
706 707
{
	struct drm_i915_private *dev_priv = dev->dev_private;
708

709 710 711
	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev);
	mutex_unlock(&dev->struct_mutex);
712

713
	i915_restore_state(dev);
714
	intel_opregion_setup(dev);
715

716 717
	intel_init_pch_refclk(dev);
	drm_mode_config_reset(dev);
718

719 720 721 722 723 724 725 726 727 728
	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

729 730 731 732 733 734
	mutex_lock(&dev->struct_mutex);
	if (i915_gem_init_hw(dev)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
	}
	mutex_unlock(&dev->struct_mutex);
735

736
	intel_modeset_init_hw(dev);
737

738 739 740 741
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
	spin_unlock_irq(&dev_priv->irq_lock);
742

743
	drm_modeset_lock_all(dev);
744
	intel_display_resume(dev);
745
	drm_modeset_unlock_all(dev);
746

747
	intel_dp_mst_resume(dev);
748

749 750 751 752 753 754 755 756 757
	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
	 * bother with the tiny race here where we might loose hotplug
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
	/* Config may have changed between suspend and resume */
	drm_helper_hpd_irq_event(dev);
758

759 760
	intel_opregion_init(dev);

761
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
762

763 764 765
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
766

767 768
	intel_opregion_notify_adapter(dev, PCI_D0);

769 770
	drm_kms_helper_poll_enable(dev);

771
	return 0;
772 773
}

774
static int i915_drm_resume_early(struct drm_device *dev)
775
{
776
	struct drm_i915_private *dev_priv = dev->dev_private;
777
	int ret = 0;
778

779 780 781 782 783 784 785 786 787
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
788 789 790 791 792
	if (pci_enable_device(dev->pdev))
		return -EIO;

	pci_set_master(dev->pdev);

793
	if (IS_VALLEYVIEW(dev_priv))
794
		ret = vlv_resume_prepare(dev_priv, false);
795
	if (ret)
796 797
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);
798 799

	intel_uncore_early_sanitize(dev, true);
800

801 802
	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
803 804
	else if (IS_SKYLAKE(dev_priv))
		ret = skl_resume_prepare(dev_priv);
805 806
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
807

808 809 810 811
	intel_uncore_sanitize(dev);
	intel_power_domains_init_hw(dev_priv);

	return ret;
812 813
}

814
int i915_resume_legacy(struct drm_device *dev)
815
{
816
	int ret;
817

818 819 820
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

821
	ret = i915_drm_resume_early(dev);
822 823 824
	if (ret)
		return ret;

825 826 827
	return i915_drm_resume(dev);
}

828
/**
829
 * i915_reset - reset chip after a hang
830 831 832 833 834 835 836 837 838 839 840 841 842
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
843
int i915_reset(struct drm_device *dev)
844
{
845
	struct drm_i915_private *dev_priv = dev->dev_private;
846
	bool simulated;
847
	int ret;
848

849 850
	intel_reset_gt_powersave(dev);

851
	mutex_lock(&dev->struct_mutex);
852

853
	i915_gem_reset(dev);
854

855 856
	simulated = dev_priv->gpu_error.stop_rings != 0;

857 858 859 860 861 862 863
	ret = intel_gpu_reset(dev);

	/* Also reset the gpu hangman. */
	if (simulated) {
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
		dev_priv->gpu_error.stop_rings = 0;
		if (ret == -ENODEV) {
864 865
			DRM_INFO("Reset not implemented, but ignoring "
				 "error for simulated gpu hangs\n");
866 867
			ret = 0;
		}
868
	}
869

870 871 872
	if (i915_stop_ring_allow_warn(dev_priv))
		pr_notice("drm/i915: Resetting chip after gpu hang\n");

873
	if (ret) {
874
		DRM_ERROR("Failed to reset chip: %i\n", ret);
875
		mutex_unlock(&dev->struct_mutex);
876
		return ret;
877 878
	}

879 880
	intel_overlay_reset(dev_priv);

881 882 883 884 885 886 887 888 889 890 891 892 893 894
	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
895

896 897
	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
	dev_priv->gpu_error.reload_in_reset = true;
898

899
	ret = i915_gem_init_hw(dev);
900

901
	dev_priv->gpu_error.reload_in_reset = false;
902

903 904 905 906
	mutex_unlock(&dev->struct_mutex);
	if (ret) {
		DRM_ERROR("Failed hw init on reset %d\n", ret);
		return ret;
907 908
	}

909 910 911 912 913 914 915 916 917
	/*
	 * rps/rc6 re-init is necessary to restore state lost after the
	 * reset and the re-install of gt irqs. Skip for ironlake per
	 * previous concerns that it doesn't respond well to some forms
	 * of re-init after reset.
	 */
	if (INTEL_INFO(dev)->gen > 5)
		intel_enable_gt_powersave(dev);

918 919 920
	return 0;
}

921
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
922
{
923 924 925
	struct intel_device_info *intel_info =
		(struct intel_device_info *) ent->driver_data;

926
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
927 928 929 930 931
		DRM_INFO("This hardware requires preliminary hardware support.\n"
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
		return -ENODEV;
	}

932 933 934 935 936 937 938 939
	/* Only bind to function 0 of the device. Early generations
	 * used function 1 as a placeholder for multi-head. This causes
	 * us confusion instead, especially on the systems where both
	 * functions have the same PCI-ID!
	 */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;

D
Daniel Vetter 已提交
940
	driver.driver_features &= ~(DRIVER_USE_AGP);
941

942
	return drm_get_pci_dev(pdev, ent, &driver);
943 944 945 946 947 948 949 950 951 952
}

static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}

953
static int i915_pm_suspend(struct device *dev)
954
{
955 956
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
957

958 959 960 961
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
962

963 964 965
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

966
	return i915_drm_suspend(drm_dev);
967 968 969 970
}

static int i915_pm_suspend_late(struct device *dev)
{
I
Imre Deak 已提交
971
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
972 973

	/*
D
Damien Lespiau 已提交
974
	 * We have a suspend ordering issue with the snd-hda driver also
975 976 977 978 979 980 981 982 983
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
984

985 986 987 988 989 990 991 992 993 994 995
	return i915_drm_suspend_late(drm_dev, false);
}

static int i915_pm_poweroff_late(struct device *dev)
{
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;

	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

	return i915_drm_suspend_late(drm_dev, true);
996 997
}

998 999
static int i915_pm_resume_early(struct device *dev)
{
I
Imre Deak 已提交
1000
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1001

1002 1003 1004
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

1005
	return i915_drm_resume_early(drm_dev);
1006 1007
}

1008
static int i915_pm_resume(struct device *dev)
1009
{
I
Imre Deak 已提交
1010
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1011

1012 1013 1014
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

1015
	return i915_drm_resume(drm_dev);
1016 1017
}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
{
	/* Enabling DC6 is not a hard requirement to enter runtime D3 */

	/*
	 * This is to ensure that CSR isn't identified as loaded before
	 * CSR-loading program is called during runtime-resume.
	 */
	intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);

1028 1029
	skl_uninit_cdclk(dev_priv);

1030 1031 1032
	return 0;
}

1033
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1034
{
P
Paulo Zanoni 已提交
1035
	hsw_enable_pc8(dev_priv);
1036 1037

	return 0;
1038 1039
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

	/* TODO: when DC5 support is added disable DC5 here. */

	broxton_ddi_phy_uninit(dev);
	broxton_uninit_cdclk(dev);
	bxt_enable_dc9(dev_priv);

	return 0;
}

static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

	/* TODO: when CSR FW support is added make sure the FW is loaded */

	bxt_disable_dc9(dev_priv);

	/*
	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
	 * is available.
	 */
	broxton_init_cdclk(dev);
	broxton_ddi_phy_init(dev);
	intel_prepare_ddi(dev);

	return 0;
}

1072 1073 1074 1075
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

1076
	skl_init_cdclk(dev_priv);
1077 1078 1079 1080 1081
	intel_csr_load_program(dev);

	return 0;
}

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1124
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
1177
	s->pcbr			= I915_READ(VLV_PCBR);
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1206
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
1272
	I915_WRITE(VLV_PCBR,			s->pcbr);
1273 1274 1275
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

1292
	err = wait_for(COND, 20);
1293 1294 1295 1296 1297 1298 1299 1300
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
#undef COND
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
	u32 val;
	int err = 0;

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
	      allow)
	err = wait_for(COND, 1);
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
	return err;
#undef COND
}

static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				 bool wait_for_on)
{
	u32 mask;
	u32 val;
	int err;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
	if (COND)
		return 0;

	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
			wait_for_on ? "on" : "off",
			I915_READ(VLV_GTLC_PW_STATUS));

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
	 */
	err = wait_for(COND, 3);
	if (err)
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
			  wait_for_on ? "on" : "off");

	return err;
#undef COND
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

	DRM_ERROR("GT register access while GT waking disabled\n");
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

1361
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
	(void)vlv_wait_for_gt_wells(dev_priv, false);

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
1384 1385 1386

	if (!IS_CHERRYVIEW(dev_priv->dev))
		vlv_save_gunit_s0ix_state(dev_priv);
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

1403 1404
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
{
	struct drm_device *dev = dev_priv->dev;
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

1417 1418
	if (!IS_CHERRYVIEW(dev_priv->dev))
		vlv_restore_gunit_s0ix_state(dev_priv);
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

1430 1431 1432 1433
	if (rpm_resume) {
		intel_init_clock_gating(dev);
		i915_gem_restore_fences(dev);
	}
1434 1435 1436 1437

	return ret;
}

1438
static int intel_runtime_suspend(struct device *device)
1439 1440 1441 1442
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1443
	int ret;
1444

1445
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1446 1447
		return -ENODEV;

1448 1449 1450
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

1451 1452
	DRM_DEBUG_KMS("Suspending device\n");

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
	/*
	 * We could deadlock here in case another thread holding struct_mutex
	 * calls RPM suspend concurrently, since the RPM suspend will wait
	 * first for this RPM suspend to finish. In this case the concurrent
	 * RPM resume will be followed by its RPM suspend counterpart. Still
	 * for consistency return -EAGAIN, which will reschedule this suspend.
	 */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
		 * Bump the expiration timestamp, otherwise the suspend won't
		 * be rescheduled.
		 */
		pm_runtime_mark_last_busy(device);

		return -EAGAIN;
	}
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);

1477
	intel_suspend_gt_powersave(dev);
1478
	intel_runtime_pm_disable_interrupts(dev_priv);
1479

1480
	ret = intel_suspend_complete(dev_priv);
1481 1482
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1483
		intel_runtime_pm_enable_interrupts(dev_priv);
1484 1485 1486

		return ret;
	}
1487

1488
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1489
	intel_uncore_forcewake_reset(dev, false);
1490
	dev_priv->pm.suspended = true;
1491 1492

	/*
1493 1494
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1495
	 */
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
	if (IS_HASWELL(dev)) {
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
		intel_opregion_notify_adapter(dev, PCI_D1);
	} else {
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it. Let's
		 * assume the other non-Haswell platforms will stay the same as
		 * Broadwell.
		 */
		intel_opregion_notify_adapter(dev, PCI_D3hot);
	}
1516

1517
	assert_forcewakes_inactive(dev_priv);
1518

1519
	DRM_DEBUG_KMS("Device suspended\n");
1520 1521 1522
	return 0;
}

1523
static int intel_runtime_resume(struct device *device)
1524 1525 1526 1527
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1528
	int ret = 0;
1529

1530 1531
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;
1532 1533 1534

	DRM_DEBUG_KMS("Resuming device\n");

1535
	intel_opregion_notify_adapter(dev, PCI_D0);
1536 1537
	dev_priv->pm.suspended = false;

1538 1539
	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);
1540 1541 1542

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
1543 1544
	else if (IS_SKYLAKE(dev))
		ret = skl_resume_prepare(dev_priv);
1545 1546 1547 1548 1549
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, true);

1550 1551 1552 1553
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1554 1555 1556
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

1557
	intel_runtime_pm_enable_interrupts(dev_priv);
1558
	intel_enable_gt_powersave(dev);
1559

1560 1561 1562 1563 1564 1565
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
1566 1567
}

1568 1569 1570 1571
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
1572 1573 1574 1575
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	int ret;

1576
	if (IS_BROXTON(dev_priv))
1577
		ret = bxt_suspend_complete(dev_priv);
1578
	else if (IS_SKYLAKE(dev_priv))
1579
		ret = skl_suspend_complete(dev_priv);
1580
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1581
		ret = hsw_suspend_complete(dev_priv);
1582
	else if (IS_VALLEYVIEW(dev_priv))
1583
		ret = vlv_suspend_complete(dev_priv);
1584 1585
	else
		ret = 0;
1586 1587 1588 1589

	return ret;
}

1590
static const struct dev_pm_ops i915_pm_ops = {
1591 1592 1593 1594
	/*
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
	 * PMSG_RESUME]
	 */
1595
	.suspend = i915_pm_suspend,
1596 1597
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1598
	.resume = i915_pm_resume,
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614

	/*
	 * S4 event handlers
	 * @freeze, @freeze_late    : called (1) before creating the
	 *                            hibernation image [PMSG_FREEZE] and
	 *                            (2) after rebooting, before restoring
	 *                            the image [PMSG_QUIESCE]
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
	 *                            image, before writing it [PMSG_THAW]
	 *                            and (2) after failing to create or
	 *                            restore the image [PMSG_RECOVER]
	 * @poweroff, @poweroff_late: called after writing the hibernation
	 *                            image, before rebooting [PMSG_HIBERNATE]
	 * @restore, @restore_early : called after rebooting and restoring the
	 *                            hibernation image [PMSG_RESTORE]
	 */
1615 1616 1617 1618 1619
	.freeze = i915_pm_suspend,
	.freeze_late = i915_pm_suspend_late,
	.thaw_early = i915_pm_resume_early,
	.thaw = i915_pm_resume,
	.poweroff = i915_pm_suspend,
1620
	.poweroff_late = i915_pm_poweroff_late,
1621
	.restore_early = i915_pm_resume_early,
1622
	.restore = i915_pm_resume,
1623 1624

	/* S0ix (via runtime suspend) event handlers */
1625 1626
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1627 1628
};

1629
static const struct vm_operations_struct i915_gem_vm_ops = {
1630
	.fault = i915_gem_fault,
1631 1632
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
1633 1634
};

1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

L
Linus Torvalds 已提交
1649
static struct drm_driver driver = {
1650 1651
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1652
	 */
1653
	.driver_features =
D
Daniel Vetter 已提交
1654
	    DRIVER_USE_AGP |
1655 1656
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
	    DRIVER_RENDER,
1657
	.load = i915_driver_load,
J
Jesse Barnes 已提交
1658
	.unload = i915_driver_unload,
1659
	.open = i915_driver_open,
1660 1661
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
1662
	.postclose = i915_driver_postclose,
1663
	.set_busid = drm_pci_set_busid,
1664 1665

	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1666
	.suspend = i915_suspend_legacy,
1667
	.resume = i915_resume_legacy,
1668

1669
	.device_is_agp = i915_driver_device_is_agp,
1670
#if defined(CONFIG_DEBUG_FS)
1671 1672
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
1673
#endif
1674
	.gem_free_object = i915_gem_free_object,
1675
	.gem_vm_ops = &i915_gem_vm_ops,
1676 1677 1678 1679 1680 1681

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1682
	.dumb_create = i915_gem_dumb_create,
1683
	.dumb_map_offset = i915_gem_mmap_gtt,
1684
	.dumb_destroy = drm_gem_dumb_destroy,
L
Linus Torvalds 已提交
1685
	.ioctls = i915_ioctls,
1686
	.fops = &i915_driver_fops,
1687 1688 1689 1690 1691 1692
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1693 1694
};

1695 1696 1697 1698 1699 1700 1701 1702
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};

L
Linus Torvalds 已提交
1703 1704 1705
static int __init i915_init(void)
{
	driver.num_ioctls = i915_max_ioctl;
J
Jesse Barnes 已提交
1706 1707

	/*
1708 1709 1710
	 * Enable KMS by default, unless explicitly overriden by
	 * either the i915.modeset prarameter or by the
	 * vga_text_mode_force boot option.
J
Jesse Barnes 已提交
1711
	 */
1712 1713 1714 1715
	driver.driver_features |= DRIVER_MODESET;

	if (i915.modeset == 0)
		driver.driver_features &= ~DRIVER_MODESET;
J
Jesse Barnes 已提交
1716 1717

#ifdef CONFIG_VGA_CONSOLE
1718
	if (vgacon_text_force() && i915.modeset == -1)
J
Jesse Barnes 已提交
1719 1720 1721
		driver.driver_features &= ~DRIVER_MODESET;
#endif

D
Daniel Vetter 已提交
1722
	if (!(driver.driver_features & DRIVER_MODESET)) {
1723
		driver.get_vblank_timestamp = NULL;
D
Daniel Vetter 已提交
1724
		/* Silently fail loading to not upset userspace. */
1725
		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
D
Daniel Vetter 已提交
1726 1727
		return 0;
	}
1728

1729 1730 1731 1732 1733 1734 1735 1736
	/*
	 * FIXME: Note that we're lying to the DRM core here so that we can get access
	 * to the atomic ioctl and the atomic properties.  Only plane operations on
	 * a single CRTC will actually work.
	 */
	if (i915.nuclear_pageflip)
		driver.driver_features |= DRIVER_ATOMIC;

1737
	return drm_pci_init(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1738 1739 1740 1741
}

static void __exit i915_exit(void)
{
1742 1743 1744
	if (!(driver.driver_features & DRIVER_MODESET))
		return; /* Never loaded a driver. */

1745
	drm_pci_exit(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1746 1747 1748 1749 1750
}

module_init(i915_init);
module_exit(i915_exit);

1751
MODULE_AUTHOR("Tungsten Graphics, Inc.");
1752
MODULE_AUTHOR("Intel Corporation");
1753

D
Dave Airlie 已提交
1754
MODULE_DESCRIPTION(DRIVER_DESC);
L
Linus Torvalds 已提交
1755
MODULE_LICENSE("GPL and additional rights");