i915_drv.c 47.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/device.h>
31
#include <linux/acpi.h>
32 33
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
L
Linus Torvalds 已提交
37

J
Jesse Barnes 已提交
38
#include <linux/console.h>
39
#include <linux/module.h>
40
#include <linux/pm_runtime.h>
41
#include <drm/drm_crtc_helper.h>
J
Jesse Barnes 已提交
42

43 44
static struct drm_driver driver;

45 46 47 48 49 50 51
#define GEN_DEFAULT_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }

52 53 54 55 56 57 58
#define GEN_CHV_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  CHV_PIPE_C_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   CHV_TRANSCODER_C_OFFSET, }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
			     CHV_PALETTE_C_OFFSET }
59

60 61 62 63 64 65
#define CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }

#define IVB_CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }

66
static const struct intel_device_info intel_i830_info = {
67
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
68
	.has_overlay = 1, .overlay_needs_physical = 1,
69
	.ring_mask = RENDER_RING,
70
	GEN_DEFAULT_PIPEOFFSETS,
71
	CURSOR_OFFSETS,
72 73
};

74
static const struct intel_device_info intel_845g_info = {
75
	.gen = 2, .num_pipes = 1,
76
	.has_overlay = 1, .overlay_needs_physical = 1,
77
	.ring_mask = RENDER_RING,
78
	GEN_DEFAULT_PIPEOFFSETS,
79
	CURSOR_OFFSETS,
80 81
};

82
static const struct intel_device_info intel_i85x_info = {
83
	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
84
	.cursor_needs_physical = 1,
85
	.has_overlay = 1, .overlay_needs_physical = 1,
86
	.has_fbc = 1,
87
	.ring_mask = RENDER_RING,
88
	GEN_DEFAULT_PIPEOFFSETS,
89
	CURSOR_OFFSETS,
90 91
};

92
static const struct intel_device_info intel_i865g_info = {
93
	.gen = 2, .num_pipes = 1,
94
	.has_overlay = 1, .overlay_needs_physical = 1,
95
	.ring_mask = RENDER_RING,
96
	GEN_DEFAULT_PIPEOFFSETS,
97
	CURSOR_OFFSETS,
98 99
};

100
static const struct intel_device_info intel_i915g_info = {
101
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
102
	.has_overlay = 1, .overlay_needs_physical = 1,
103
	.ring_mask = RENDER_RING,
104
	GEN_DEFAULT_PIPEOFFSETS,
105
	CURSOR_OFFSETS,
106
};
107
static const struct intel_device_info intel_i915gm_info = {
108
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
109
	.cursor_needs_physical = 1,
110
	.has_overlay = 1, .overlay_needs_physical = 1,
111
	.supports_tv = 1,
112
	.has_fbc = 1,
113
	.ring_mask = RENDER_RING,
114
	GEN_DEFAULT_PIPEOFFSETS,
115
	CURSOR_OFFSETS,
116
};
117
static const struct intel_device_info intel_i945g_info = {
118
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
119
	.has_overlay = 1, .overlay_needs_physical = 1,
120
	.ring_mask = RENDER_RING,
121
	GEN_DEFAULT_PIPEOFFSETS,
122
	CURSOR_OFFSETS,
123
};
124
static const struct intel_device_info intel_i945gm_info = {
125
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
126
	.has_hotplug = 1, .cursor_needs_physical = 1,
127
	.has_overlay = 1, .overlay_needs_physical = 1,
128
	.supports_tv = 1,
129
	.has_fbc = 1,
130
	.ring_mask = RENDER_RING,
131
	GEN_DEFAULT_PIPEOFFSETS,
132
	CURSOR_OFFSETS,
133 134
};

135
static const struct intel_device_info intel_i965g_info = {
136
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
137
	.has_hotplug = 1,
138
	.has_overlay = 1,
139
	.ring_mask = RENDER_RING,
140
	GEN_DEFAULT_PIPEOFFSETS,
141
	CURSOR_OFFSETS,
142 143
};

144
static const struct intel_device_info intel_i965gm_info = {
145
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
146
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
147
	.has_overlay = 1,
148
	.supports_tv = 1,
149
	.ring_mask = RENDER_RING,
150
	GEN_DEFAULT_PIPEOFFSETS,
151
	CURSOR_OFFSETS,
152 153
};

154
static const struct intel_device_info intel_g33_info = {
155
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
156
	.need_gfx_hws = 1, .has_hotplug = 1,
157
	.has_overlay = 1,
158
	.ring_mask = RENDER_RING,
159
	GEN_DEFAULT_PIPEOFFSETS,
160
	CURSOR_OFFSETS,
161 162
};

163
static const struct intel_device_info intel_g45_info = {
164
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
165
	.has_pipe_cxsr = 1, .has_hotplug = 1,
166
	.ring_mask = RENDER_RING | BSD_RING,
167
	GEN_DEFAULT_PIPEOFFSETS,
168
	CURSOR_OFFSETS,
169 170
};

171
static const struct intel_device_info intel_gm45_info = {
172
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
173
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
174
	.has_pipe_cxsr = 1, .has_hotplug = 1,
175
	.supports_tv = 1,
176
	.ring_mask = RENDER_RING | BSD_RING,
177
	GEN_DEFAULT_PIPEOFFSETS,
178
	CURSOR_OFFSETS,
179 180
};

181
static const struct intel_device_info intel_pineview_info = {
182
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
183
	.need_gfx_hws = 1, .has_hotplug = 1,
184
	.has_overlay = 1,
185
	GEN_DEFAULT_PIPEOFFSETS,
186
	CURSOR_OFFSETS,
187 188
};

189
static const struct intel_device_info intel_ironlake_d_info = {
190
	.gen = 5, .num_pipes = 2,
191
	.need_gfx_hws = 1, .has_hotplug = 1,
192
	.ring_mask = RENDER_RING | BSD_RING,
193
	GEN_DEFAULT_PIPEOFFSETS,
194
	CURSOR_OFFSETS,
195 196
};

197
static const struct intel_device_info intel_ironlake_m_info = {
198
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
199
	.need_gfx_hws = 1, .has_hotplug = 1,
200
	.has_fbc = 1,
201
	.ring_mask = RENDER_RING | BSD_RING,
202
	GEN_DEFAULT_PIPEOFFSETS,
203
	CURSOR_OFFSETS,
204 205
};

206
static const struct intel_device_info intel_sandybridge_d_info = {
207
	.gen = 6, .num_pipes = 2,
208
	.need_gfx_hws = 1, .has_hotplug = 1,
209
	.has_fbc = 1,
210
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
211
	.has_llc = 1,
212
	GEN_DEFAULT_PIPEOFFSETS,
213
	CURSOR_OFFSETS,
214 215
};

216
static const struct intel_device_info intel_sandybridge_m_info = {
217
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
218
	.need_gfx_hws = 1, .has_hotplug = 1,
219
	.has_fbc = 1,
220
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
221
	.has_llc = 1,
222
	GEN_DEFAULT_PIPEOFFSETS,
223
	CURSOR_OFFSETS,
224 225
};

226 227 228
#define GEN7_FEATURES  \
	.gen = 7, .num_pipes = 3, \
	.need_gfx_hws = 1, .has_hotplug = 1, \
229
	.has_fbc = 1, \
230
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231
	.has_llc = 1
232

233
static const struct intel_device_info intel_ivybridge_d_info = {
234 235
	GEN7_FEATURES,
	.is_ivybridge = 1,
236
	GEN_DEFAULT_PIPEOFFSETS,
237
	IVB_CURSOR_OFFSETS,
238 239 240
};

static const struct intel_device_info intel_ivybridge_m_info = {
241 242 243
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.is_mobile = 1,
244
	GEN_DEFAULT_PIPEOFFSETS,
245
	IVB_CURSOR_OFFSETS,
246 247
};

248 249 250 251
static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
252
	GEN_DEFAULT_PIPEOFFSETS,
253
	IVB_CURSOR_OFFSETS,
254 255
};

256
static const struct intel_device_info intel_valleyview_m_info = {
257 258 259
	GEN7_FEATURES,
	.is_mobile = 1,
	.num_pipes = 2,
260
	.is_valleyview = 1,
261
	.display_mmio_offset = VLV_DISPLAY_BASE,
262
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
263
	.has_llc = 0, /* legal, last one wins */
264
	GEN_DEFAULT_PIPEOFFSETS,
265
	CURSOR_OFFSETS,
266 267 268
};

static const struct intel_device_info intel_valleyview_d_info = {
269 270
	GEN7_FEATURES,
	.num_pipes = 2,
271
	.is_valleyview = 1,
272
	.display_mmio_offset = VLV_DISPLAY_BASE,
273
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
274
	.has_llc = 0, /* legal, last one wins */
275
	GEN_DEFAULT_PIPEOFFSETS,
276
	CURSOR_OFFSETS,
277 278
};

279
static const struct intel_device_info intel_haswell_d_info = {
280 281
	GEN7_FEATURES,
	.is_haswell = 1,
282
	.has_ddi = 1,
283
	.has_fpga_dbg = 1,
284
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285
	GEN_DEFAULT_PIPEOFFSETS,
286
	IVB_CURSOR_OFFSETS,
287 288 289
};

static const struct intel_device_info intel_haswell_m_info = {
290 291 292
	GEN7_FEATURES,
	.is_haswell = 1,
	.is_mobile = 1,
293
	.has_ddi = 1,
294
	.has_fpga_dbg = 1,
295
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296
	GEN_DEFAULT_PIPEOFFSETS,
297
	IVB_CURSOR_OFFSETS,
298 299
};

B
Ben Widawsky 已提交
300
static const struct intel_device_info intel_broadwell_d_info = {
301
	.gen = 8, .num_pipes = 3,
B
Ben Widawsky 已提交
302 303 304 305
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
306
	.has_fpga_dbg = 1,
B
Ben Widawsky 已提交
307
	.has_fbc = 1,
308
	GEN_DEFAULT_PIPEOFFSETS,
309
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
310 311 312
};

static const struct intel_device_info intel_broadwell_m_info = {
313
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
B
Ben Widawsky 已提交
314 315 316 317
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
318
	.has_fpga_dbg = 1,
B
Ben Widawsky 已提交
319
	.has_fbc = 1,
320
	GEN_DEFAULT_PIPEOFFSETS,
321
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
322 323
};

324 325 326
static const struct intel_device_info intel_broadwell_gt3d_info = {
	.gen = 8, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
327
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328 329
	.has_llc = 1,
	.has_ddi = 1,
330
	.has_fpga_dbg = 1,
331 332
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
333
	IVB_CURSOR_OFFSETS,
334 335 336 337 338
};

static const struct intel_device_info intel_broadwell_gt3m_info = {
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
339
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340 341
	.has_llc = 1,
	.has_ddi = 1,
342
	.has_fpga_dbg = 1,
343 344
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
345
	IVB_CURSOR_OFFSETS,
346 347
};

348 349
static const struct intel_device_info intel_cherryview_info = {
	.is_preliminary = 1,
350
	.gen = 8, .num_pipes = 3,
351 352 353 354
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.is_valleyview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
355
	GEN_CHV_PIPEOFFSETS,
356
	CURSOR_OFFSETS,
357 358
};

359 360
static const struct intel_device_info intel_skylake_info = {
	.is_preliminary = 1,
361
	.is_skylake = 1,
362 363 364 365 366
	.gen = 9, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
D
Daisy Sun 已提交
367
	.has_fbc = 1,
368 369 370 371
	GEN_DEFAULT_PIPEOFFSETS,
	IVB_CURSOR_OFFSETS,
};

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
/*
 * Make sure any device matches here are from most specific to most
 * general.  For example, since the Quanta match is based on the subsystem
 * and subvendor IDs, we need it to come before the more general IVB
 * PCI ID matches, otherwise we'll use the wrong info struct above.
 */
#define INTEL_PCI_IDS \
	INTEL_I830_IDS(&intel_i830_info),	\
	INTEL_I845G_IDS(&intel_845g_info),	\
	INTEL_I85X_IDS(&intel_i85x_info),	\
	INTEL_I865G_IDS(&intel_i865g_info),	\
	INTEL_I915G_IDS(&intel_i915g_info),	\
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
	INTEL_I945G_IDS(&intel_i945g_info),	\
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
	INTEL_I965G_IDS(&intel_i965g_info),	\
	INTEL_G33_IDS(&intel_g33_info),		\
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
	INTEL_GM45_IDS(&intel_gm45_info), 	\
	INTEL_G45_IDS(&intel_g45_info), 	\
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
B
Ben Widawsky 已提交
403
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
404 405 406
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
407
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
408 409
	INTEL_CHV_IDS(&intel_cherryview_info),	\
	INTEL_SKL_IDS(&intel_skylake_info)
410

411
static const struct pci_device_id pciidlist[] = {		/* aka */
412
	INTEL_PCI_IDS,
413
	{0, 0, 0}
L
Linus Torvalds 已提交
414 415
};

J
Jesse Barnes 已提交
416 417 418 419
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif

420
void intel_detect_pch(struct drm_device *dev)
421 422
{
	struct drm_i915_private *dev_priv = dev->dev_private;
423
	struct pci_dev *pch = NULL;
424

B
Ben Widawsky 已提交
425 426 427 428 429 430 431 432
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

433 434 435 436 437
	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
438 439 440 441 442
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
443
	 */
444
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
445
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
446
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
447
			dev_priv->pch_id = id;
448

449 450 451
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
452
				WARN_ON(!IS_GEN5(dev));
453
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
454 455
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
456
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
J
Jesse Barnes 已提交
457 458 459
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
460
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
461
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
462 463 464
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
465
				WARN_ON(!IS_HASWELL(dev));
466
				WARN_ON(IS_ULT(dev));
467 468 469 470 471 472
			} else if (IS_BROADWELL(dev)) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->pch_id =
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
				DRM_DEBUG_KMS("This is Broadwell, assuming "
					      "LynxPoint LP PCH\n");
473 474 475 476 477
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(!IS_ULT(dev));
478 479 480 481 482 483 484 485 486 487
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
				WARN_ON(!IS_SKYLAKE(dev));
				WARN_ON(IS_ULT(dev));
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
				WARN_ON(!IS_SKYLAKE(dev));
				WARN_ON(!IS_ULT(dev));
488 489 490
			} else
				continue;

491
			break;
492 493
		}
	}
494
	if (!pch)
495 496 497
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
498 499
}

500 501 502
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
503
		return false;
504

505 506
	if (i915.semaphores >= 0)
		return i915.semaphores;
507

508 509 510 511
	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

512 513 514 515
	/* Until we get further testing... */
	if (IS_GEN8(dev))
		return false;

516
#ifdef CONFIG_INTEL_IOMMU
517
	/* Enable semaphores on SNB when IO remapping is off */
518 519 520
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
521

522
	return true;
523 524
}

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);

	dev_priv->long_hpd_port_mask = 0;
	dev_priv->short_hpd_port_mask = 0;
	dev_priv->hpd_event_bits = 0;

	spin_unlock_irq(&dev_priv->irq_lock);

	cancel_work_sync(&dev_priv->dig_port_work);
	cancel_work_sync(&dev_priv->hotplug_work);
	cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct drm_encoder *encoder;

	drm_modeset_lock_all(dev);
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);

		if (intel_encoder->suspend)
			intel_encoder->suspend(intel_encoder);
	}
	drm_modeset_unlock_all(dev);
}

555
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
556 557
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume);
558

559
static int i915_drm_freeze(struct drm_device *dev)
J
Jesse Barnes 已提交
560
{
561
	struct drm_i915_private *dev_priv = dev->dev_private;
562
	struct drm_crtc *crtc;
563
	pci_power_t opregion_target_state;
564

565 566 567 568 569
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

570 571
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
572
	intel_display_set_init_power(dev_priv, true);
573

574 575
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
576 577
	pci_save_state(dev->pdev);

578
	/* If KMS is active, we do the leavevt stuff here */
579
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
580 581
		int error;

582
		error = i915_gem_suspend(dev);
583
		if (error) {
584
			dev_err(&dev->pdev->dev,
585 586 587
				"GEM idle failed, resume might fail\n");
			return error;
		}
588

589 590
		/*
		 * Disable CRTCs directly since we want to preserve sw state
591
		 * for _thaw. Also, power gate the CRTC power wells.
592
		 */
593
		drm_modeset_lock_all(dev);
594 595
		for_each_crtc(dev, crtc)
			intel_crtc_control(crtc, false);
596
		drm_modeset_unlock_all(dev);
597

598
		intel_dp_mst_suspend(dev);
599 600 601

		flush_delayed_work(&dev_priv->rps.delayed_resume_work);

602
		intel_runtime_pm_disable_interrupts(dev);
603
		intel_hpd_cancel_work(dev_priv);
604

605 606
		intel_suspend_encoders(dev_priv);

607 608
		intel_suspend_gt_powersave(dev);

609
		intel_suspend_hw(dev);
610 611
	}

612 613
	i915_gem_suspend_gtt_mappings(dev);

614 615
	i915_save_state(dev);

616 617 618
	opregion_target_state = PCI_D3cold;
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
619
		opregion_target_state = PCI_D1;
620
#endif
621 622
	intel_opregion_notify_adapter(dev, opregion_target_state);

623
	intel_uncore_forcewake_reset(dev, false);
624
	intel_opregion_fini(dev);
625

626
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
627

628 629
	dev_priv->suspend_count++;

630 631
	intel_display_set_init_power(dev_priv, false);

632
	return 0;
633 634
}

635
int i915_suspend(struct drm_device *dev, pm_message_t state)
636 637 638 639 640 641 642 643 644 645 646 647
{
	int error;

	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (state.event == PM_EVENT_PRETHAW)
		return 0;

648 649 650

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
651

652 653 654 655
	error = i915_drm_freeze(dev);
	if (error)
		return error;

656 657 658 659 660
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
J
Jesse Barnes 已提交
661 662 663 664

	return 0;
}

665
static int i915_drm_thaw_early(struct drm_device *dev)
J
Jesse Barnes 已提交
666
{
667
	struct drm_i915_private *dev_priv = dev->dev_private;
668
	int ret;
669

670 671 672
	ret = intel_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
673

674
	intel_uncore_early_sanitize(dev, true);
675
	intel_uncore_sanitize(dev);
676 677
	intel_power_domains_init_hw(dev_priv);

678
	return ret;
679 680 681 682 683
}

static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
684 685 686 687 688 689 690 691

	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
	    restore_gtt_mappings) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

692
	i915_restore_state(dev);
693
	intel_opregion_setup(dev);
694

695 696
	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
P
Paulo Zanoni 已提交
697
		intel_init_pch_refclk(dev);
698
		drm_mode_config_reset(dev);
699

700
		mutex_lock(&dev->struct_mutex);
701 702 703 704
		if (i915_gem_init_hw(dev)) {
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
		}
705
		mutex_unlock(&dev->struct_mutex);
706

707
		/* We need working interrupts for modeset enabling ... */
708
		intel_runtime_pm_restore_interrupts(dev);
709

710
		intel_modeset_init_hw(dev);
711

712
		{
713
			spin_lock_irq(&dev_priv->irq_lock);
714 715
			if (dev_priv->display.hpd_irq_setup)
				dev_priv->display.hpd_irq_setup(dev);
716
			spin_unlock_irq(&dev_priv->irq_lock);
717 718 719
		}

		intel_dp_mst_resume(dev);
720 721 722
		drm_modeset_lock_all(dev);
		intel_modeset_setup_hw_state(dev, true);
		drm_modeset_unlock_all(dev);
723 724 725 726 727 728 729

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
730
		intel_hpd_init(dev);
731
		/* Config may have changed between suspend and resume */
732
		drm_helper_hpd_irq_event(dev);
J
Jesse Barnes 已提交
733
	}
734

735 736
	intel_opregion_init(dev);

737
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
738

739 740 741
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
742

743 744
	intel_opregion_notify_adapter(dev, PCI_D0);

745
	return 0;
746 747
}

748 749
static int i915_drm_thaw(struct drm_device *dev)
{
750
	if (drm_core_check_feature(dev, DRIVER_MODESET))
751
		i915_check_and_clear_faults(dev);
752

753
	return __i915_drm_thaw(dev, true);
754 755
}

756
static int i915_resume_early(struct drm_device *dev)
757
{
758 759 760
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

761 762 763 764 765 766 767 768 769
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
770 771 772 773 774
	if (pci_enable_device(dev->pdev))
		return -EIO;

	pci_set_master(dev->pdev);

775 776 777 778 779 780 781 782
	return i915_drm_thaw_early(dev);
}

int i915_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

783 784
	/*
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
785 786
	 * earlier) need to restore the GTT mappings since the BIOS might clear
	 * all our scratch PTEs.
787
	 */
788
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
789 790 791 792 793
	if (ret)
		return ret;

	drm_kms_helper_poll_enable(dev);
	return 0;
J
Jesse Barnes 已提交
794 795
}

796 797 798 799 800 801 802 803
static int i915_resume_legacy(struct drm_device *dev)
{
	i915_resume_early(dev);
	i915_resume(dev);

	return 0;
}

804
/**
805
 * i915_reset - reset chip after a hang
806 807 808 809 810 811 812 813 814 815 816 817 818
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
819
int i915_reset(struct drm_device *dev)
820
{
821
	struct drm_i915_private *dev_priv = dev->dev_private;
822
	bool simulated;
823
	int ret;
824

825
	if (!i915.reset)
C
Chris Wilson 已提交
826 827
		return 0;

828
	mutex_lock(&dev->struct_mutex);
829

830
	i915_gem_reset(dev);
831

832 833
	simulated = dev_priv->gpu_error.stop_rings != 0;

834 835 836 837 838 839 840
	ret = intel_gpu_reset(dev);

	/* Also reset the gpu hangman. */
	if (simulated) {
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
		dev_priv->gpu_error.stop_rings = 0;
		if (ret == -ENODEV) {
841 842
			DRM_INFO("Reset not implemented, but ignoring "
				 "error for simulated gpu hangs\n");
843 844
			ret = 0;
		}
845
	}
846

847
	if (ret) {
848
		DRM_ERROR("Failed to reset chip: %i\n", ret);
849
		mutex_unlock(&dev->struct_mutex);
850
		return ret;
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
868 869
			!dev_priv->ums.mm_suspended) {
		dev_priv->ums.mm_suspended = 0;
870

871 872 873
		/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
		dev_priv->gpu_error.reload_in_reset = true;

874
		ret = i915_gem_init_hw(dev);
875 876 877

		dev_priv->gpu_error.reload_in_reset = false;

878
		mutex_unlock(&dev->struct_mutex);
879 880 881 882
		if (ret) {
			DRM_ERROR("Failed hw init on reset %d\n", ret);
			return ret;
		}
883

884
		/*
885 886 887
		 * FIXME: This races pretty badly against concurrent holders of
		 * ring interrupts. This is possible since we've started to drop
		 * dev->struct_mutex in select places when waiting for the gpu.
888
		 */
J
Jeff McGee 已提交
889

890 891 892
		/*
		 * rps/rc6 re-init is necessary to restore state lost after the
		 * reset and the re-install of gt irqs. Skip for ironlake per
J
Jeff McGee 已提交
893
		 * previous concerns that it doesn't respond well to some forms
894 895
		 * of re-init after reset.
		 */
896
		if (INTEL_INFO(dev)->gen > 5)
897
			intel_reset_gt_powersave(dev);
898 899
	} else {
		mutex_unlock(&dev->struct_mutex);
900 901 902 903 904
	}

	return 0;
}

905
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
906
{
907 908 909
	struct intel_device_info *intel_info =
		(struct intel_device_info *) ent->driver_data;

910
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
911 912 913 914 915
		DRM_INFO("This hardware requires preliminary hardware support.\n"
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
		return -ENODEV;
	}

916 917 918 919 920 921 922 923
	/* Only bind to function 0 of the device. Early generations
	 * used function 1 as a placeholder for multi-head. This causes
	 * us confusion instead, especially on the systems where both
	 * functions have the same PCI-ID!
	 */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;

D
Daniel Vetter 已提交
924
	driver.driver_features &= ~(DRIVER_USE_AGP);
925

926
	return drm_get_pci_dev(pdev, ent, &driver);
927 928 929 930 931 932 933 934 935 936
}

static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}

937
static int i915_pm_suspend(struct device *dev)
938
{
939 940
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
941

942 943 944 945
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
946

947 948 949
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

950 951 952 953 954 955 956
	return i915_drm_freeze(drm_dev);
}

static int i915_pm_suspend_late(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
957
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
958
	int ret;
959 960 961 962 963 964 965 966 967 968 969 970

	/*
	 * We have a suspedn ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
971

972
	ret = intel_suspend_complete(dev_priv);
973

974 975 976 977 978 979
	if (ret)
		DRM_ERROR("Suspend complete failed: %d\n", ret);
	else {
		pci_disable_device(pdev);
		pci_set_power_state(pdev, PCI_D3hot);
	}
980

981
	return ret;
982 983
}

984 985 986 987 988 989 990 991
static int i915_pm_resume_early(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume_early(drm_dev);
}

992
static int i915_pm_resume(struct device *dev)
993
{
994 995 996 997
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume(drm_dev);
998 999
}

1000
static int i915_pm_freeze(struct device *dev)
1001
{
1002 1003 1004 1005 1006 1007 1008 1009 1010
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	return i915_drm_freeze(drm_dev);
1011 1012
}

1013 1014 1015 1016 1017 1018 1019 1020
static int i915_pm_thaw_early(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw_early(drm_dev);
}

1021
static int i915_pm_thaw(struct device *dev)
1022
{
1023 1024 1025 1026
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw(drm_dev);
1027 1028
}

1029
static int i915_pm_poweroff(struct device *dev)
1030
{
1031 1032 1033
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

1034
	return i915_drm_freeze(drm_dev);
1035 1036
}

1037
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1038
{
P
Paulo Zanoni 已提交
1039
	hsw_enable_pc8(dev_priv);
1040 1041

	return 0;
1042 1043
}

1044 1045
static int snb_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1046 1047 1048
{
	struct drm_device *dev = dev_priv->dev;

1049 1050
	if (rpm_resume)
		intel_init_pch_refclk(dev);
1051 1052

	return 0;
1053 1054
}

1055 1056
static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1057
{
P
Paulo Zanoni 已提交
1058
	hsw_disable_pc8(dev_priv);
1059 1060

	return 0;
1061 1062
}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);

#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
	/* Wait for a previous force-off to settle */
	if (force_on) {
1266
		err = wait_for(!COND, 20);
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
		if (err) {
			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
			return err;
		}
	}

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

1283
	err = wait_for(COND, 20);
1284 1285 1286 1287 1288 1289 1290 1291
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
#undef COND
}

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
	u32 val;
	int err = 0;

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
	      allow)
	err = wait_for(COND, 1);
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
	return err;
#undef COND
}

static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				 bool wait_for_on)
{
	u32 mask;
	u32 val;
	int err;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
	if (COND)
		return 0;

	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
			wait_for_on ? "on" : "off",
			I915_READ(VLV_GTLC_PW_STATUS));

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
	 */
	err = wait_for(COND, 3);
	if (err)
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
			  wait_for_on ? "on" : "off");

	return err;
#undef COND
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

	DRM_ERROR("GT register access while GT waking disabled\n");
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

1352
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
	(void)vlv_wait_for_gt_wells(dev_priv, false);

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
	vlv_save_gunit_s0ix_state(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

1392 1393
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
{
	struct drm_device *dev = dev_priv->dev;
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

	vlv_restore_gunit_s0ix_state(dev_priv);

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

1418 1419 1420 1421
	if (rpm_resume) {
		intel_init_clock_gating(dev);
		i915_gem_restore_fences(dev);
	}
1422 1423 1424 1425

	return ret;
}

1426
static int intel_runtime_suspend(struct device *device)
1427 1428 1429 1430
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1431
	int ret;
1432

1433
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1434 1435
		return -ENODEV;

1436 1437 1438
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

1439
	assert_force_wake_inactive(dev_priv);
1440 1441 1442

	DRM_DEBUG_KMS("Suspending device\n");

1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	/*
	 * We could deadlock here in case another thread holding struct_mutex
	 * calls RPM suspend concurrently, since the RPM suspend will wait
	 * first for this RPM suspend to finish. In this case the concurrent
	 * RPM resume will be followed by its RPM suspend counterpart. Still
	 * for consistency return -EAGAIN, which will reschedule this suspend.
	 */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
		 * Bump the expiration timestamp, otherwise the suspend won't
		 * be rescheduled.
		 */
		pm_runtime_mark_last_busy(device);

		return -EAGAIN;
	}
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);

1467 1468 1469 1470 1471 1472
	/*
	 * rps.work can't be rearmed here, since we get here only after making
	 * sure the GPU is idle and the RPS freq is set to the minimum. See
	 * intel_mark_idle().
	 */
	cancel_work_sync(&dev_priv->rps.work);
1473 1474
	intel_runtime_pm_disable_interrupts(dev);

1475
	ret = intel_suspend_complete(dev_priv);
1476 1477 1478 1479 1480 1481
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
		intel_runtime_pm_restore_interrupts(dev);

		return ret;
	}
1482

1483
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1484
	dev_priv->pm.suspended = true;
1485 1486

	/*
1487 1488
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1489
	 */
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	if (IS_HASWELL(dev)) {
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
		intel_opregion_notify_adapter(dev, PCI_D1);
	} else {
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it. Let's
		 * assume the other non-Haswell platforms will stay the same as
		 * Broadwell.
		 */
		intel_opregion_notify_adapter(dev, PCI_D3hot);
	}
1510

1511
	DRM_DEBUG_KMS("Device suspended\n");
1512 1513 1514
	return 0;
}

1515
static int intel_runtime_resume(struct device *device)
1516 1517 1518 1519
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1520
	int ret;
1521

1522 1523
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;
1524 1525 1526

	DRM_DEBUG_KMS("Resuming device\n");

1527
	intel_opregion_notify_adapter(dev, PCI_D0);
1528 1529
	dev_priv->pm.suspended = false;

1530
	ret = intel_resume_prepare(dev_priv, true);
1531 1532 1533 1534
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1535 1536 1537
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

1538
	intel_runtime_pm_restore_interrupts(dev);
1539
	intel_reset_gt_powersave(dev);
1540

1541 1542 1543 1544 1545 1546
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
1547 1548
}

1549 1550 1551 1552
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
1553 1554 1555 1556 1557
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

1558
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1559
		ret = hsw_suspend_complete(dev_priv);
1560
	else if (IS_VALLEYVIEW(dev))
1561
		ret = vlv_suspend_complete(dev_priv);
1562 1563
	else
		ret = 0;
1564 1565 1566 1567

	return ret;
}

1568 1569 1570 1571 1572 1573 1574
/*
 * This function implements common functionality of runtime and system
 * resume sequence. Variable rpm_resume used for implementing different
 * code paths.
 */
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1575 1576 1577 1578
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

1579
	if (IS_GEN6(dev))
1580
		ret = snb_resume_prepare(dev_priv, rpm_resume);
1581
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1582
		ret = hsw_resume_prepare(dev_priv, rpm_resume);
1583
	else if (IS_VALLEYVIEW(dev))
1584
		ret = vlv_resume_prepare(dev_priv, rpm_resume);
1585 1586
	else
		ret = 0;
1587 1588 1589 1590

	return ret;
}

1591
static const struct dev_pm_ops i915_pm_ops = {
1592
	.suspend = i915_pm_suspend,
1593 1594
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1595 1596
	.resume = i915_pm_resume,
	.freeze = i915_pm_freeze,
1597
	.thaw_early = i915_pm_thaw_early,
1598 1599
	.thaw = i915_pm_thaw,
	.poweroff = i915_pm_poweroff,
1600
	.restore_early = i915_pm_resume_early,
1601
	.restore = i915_pm_resume,
1602 1603
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1604 1605
};

1606
static const struct vm_operations_struct i915_gem_vm_ops = {
1607
	.fault = i915_gem_fault,
1608 1609
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
1610 1611
};

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

L
Linus Torvalds 已提交
1626
static struct drm_driver driver = {
1627 1628
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1629
	 */
1630
	.driver_features =
D
Daniel Vetter 已提交
1631
	    DRIVER_USE_AGP |
1632 1633
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
	    DRIVER_RENDER,
1634
	.load = i915_driver_load,
J
Jesse Barnes 已提交
1635
	.unload = i915_driver_unload,
1636
	.open = i915_driver_open,
1637 1638
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
1639
	.postclose = i915_driver_postclose,
1640
	.set_busid = drm_pci_set_busid,
1641 1642 1643

	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
	.suspend = i915_suspend,
1644
	.resume = i915_resume_legacy,
1645

1646
	.device_is_agp = i915_driver_device_is_agp,
1647 1648
	.master_create = i915_master_create,
	.master_destroy = i915_master_destroy,
1649
#if defined(CONFIG_DEBUG_FS)
1650 1651
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
1652
#endif
1653
	.gem_free_object = i915_gem_free_object,
1654
	.gem_vm_ops = &i915_gem_vm_ops,
1655 1656 1657 1658 1659 1660

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1661 1662
	.dumb_create = i915_gem_dumb_create,
	.dumb_map_offset = i915_gem_mmap_gtt,
1663
	.dumb_destroy = drm_gem_dumb_destroy,
L
Linus Torvalds 已提交
1664
	.ioctls = i915_ioctls,
1665
	.fops = &i915_driver_fops,
1666 1667 1668 1669 1670 1671
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1672 1673
};

1674 1675 1676 1677 1678 1679 1680 1681
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};

L
Linus Torvalds 已提交
1682 1683 1684
static int __init i915_init(void)
{
	driver.num_ioctls = i915_max_ioctl;
J
Jesse Barnes 已提交
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695

	/*
	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
	 * explicitly disabled with the module pararmeter.
	 *
	 * Otherwise, just follow the parameter (defaulting to off).
	 *
	 * Allow optional vga_text_mode_force boot option to override
	 * the default behavior.
	 */
#if defined(CONFIG_DRM_I915_KMS)
1696
	if (i915.modeset != 0)
J
Jesse Barnes 已提交
1697 1698
		driver.driver_features |= DRIVER_MODESET;
#endif
1699
	if (i915.modeset == 1)
J
Jesse Barnes 已提交
1700 1701 1702
		driver.driver_features |= DRIVER_MODESET;

#ifdef CONFIG_VGA_CONSOLE
1703
	if (vgacon_text_force() && i915.modeset == -1)
J
Jesse Barnes 已提交
1704 1705 1706
		driver.driver_features &= ~DRIVER_MODESET;
#endif

D
Daniel Vetter 已提交
1707
	if (!(driver.driver_features & DRIVER_MODESET)) {
1708
		driver.get_vblank_timestamp = NULL;
D
Daniel Vetter 已提交
1709 1710
#ifndef CONFIG_DRM_I915_UMS
		/* Silently fail loading to not upset userspace. */
1711
		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
D
Daniel Vetter 已提交
1712 1713 1714
		return 0;
#endif
	}
1715

1716
	return drm_pci_init(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1717 1718 1719 1720
}

static void __exit i915_exit(void)
{
1721 1722 1723 1724 1725
#ifndef CONFIG_DRM_I915_UMS
	if (!(driver.driver_features & DRIVER_MODESET))
		return; /* Never loaded a driver. */
#endif

1726
	drm_pci_exit(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1727 1728 1729 1730 1731
}

module_init(i915_init);
module_exit(i915_exit);

1732
MODULE_AUTHOR("Tungsten Graphics, Inc.");
1733
MODULE_AUTHOR("Intel Corporation");
1734

D
Dave Airlie 已提交
1735
MODULE_DESCRIPTION(DRIVER_DESC);
L
Linus Torvalds 已提交
1736
MODULE_LICENSE("GPL and additional rights");