i915_drv.c 46.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/device.h>
31
#include <linux/acpi.h>
32 33
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
L
Linus Torvalds 已提交
37

J
Jesse Barnes 已提交
38
#include <linux/console.h>
39
#include <linux/module.h>
40
#include <linux/pm_runtime.h>
41
#include <drm/drm_crtc_helper.h>
J
Jesse Barnes 已提交
42

43 44
static struct drm_driver driver;

45 46 47 48 49 50 51
#define GEN_DEFAULT_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }

52 53 54 55 56 57 58
#define GEN_CHV_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
			  CHV_PIPE_C_OFFSET }, \
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
			   CHV_TRANSCODER_C_OFFSET, }, \
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
			     CHV_PALETTE_C_OFFSET }
59

60 61 62 63 64 65
#define CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }

#define IVB_CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }

66
static const struct intel_device_info intel_i830_info = {
67
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
68
	.has_overlay = 1, .overlay_needs_physical = 1,
69
	.ring_mask = RENDER_RING,
70
	GEN_DEFAULT_PIPEOFFSETS,
71
	CURSOR_OFFSETS,
72 73
};

74
static const struct intel_device_info intel_845g_info = {
75
	.gen = 2, .num_pipes = 1,
76
	.has_overlay = 1, .overlay_needs_physical = 1,
77
	.ring_mask = RENDER_RING,
78
	GEN_DEFAULT_PIPEOFFSETS,
79
	CURSOR_OFFSETS,
80 81
};

82
static const struct intel_device_info intel_i85x_info = {
83
	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
84
	.cursor_needs_physical = 1,
85
	.has_overlay = 1, .overlay_needs_physical = 1,
86
	.has_fbc = 1,
87
	.ring_mask = RENDER_RING,
88
	GEN_DEFAULT_PIPEOFFSETS,
89
	CURSOR_OFFSETS,
90 91
};

92
static const struct intel_device_info intel_i865g_info = {
93
	.gen = 2, .num_pipes = 1,
94
	.has_overlay = 1, .overlay_needs_physical = 1,
95
	.ring_mask = RENDER_RING,
96
	GEN_DEFAULT_PIPEOFFSETS,
97
	CURSOR_OFFSETS,
98 99
};

100
static const struct intel_device_info intel_i915g_info = {
101
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
102
	.has_overlay = 1, .overlay_needs_physical = 1,
103
	.ring_mask = RENDER_RING,
104
	GEN_DEFAULT_PIPEOFFSETS,
105
	CURSOR_OFFSETS,
106
};
107
static const struct intel_device_info intel_i915gm_info = {
108
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
109
	.cursor_needs_physical = 1,
110
	.has_overlay = 1, .overlay_needs_physical = 1,
111
	.supports_tv = 1,
112
	.has_fbc = 1,
113
	.ring_mask = RENDER_RING,
114
	GEN_DEFAULT_PIPEOFFSETS,
115
	CURSOR_OFFSETS,
116
};
117
static const struct intel_device_info intel_i945g_info = {
118
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
119
	.has_overlay = 1, .overlay_needs_physical = 1,
120
	.ring_mask = RENDER_RING,
121
	GEN_DEFAULT_PIPEOFFSETS,
122
	CURSOR_OFFSETS,
123
};
124
static const struct intel_device_info intel_i945gm_info = {
125
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
126
	.has_hotplug = 1, .cursor_needs_physical = 1,
127
	.has_overlay = 1, .overlay_needs_physical = 1,
128
	.supports_tv = 1,
129
	.has_fbc = 1,
130
	.ring_mask = RENDER_RING,
131
	GEN_DEFAULT_PIPEOFFSETS,
132
	CURSOR_OFFSETS,
133 134
};

135
static const struct intel_device_info intel_i965g_info = {
136
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
137
	.has_hotplug = 1,
138
	.has_overlay = 1,
139
	.ring_mask = RENDER_RING,
140
	GEN_DEFAULT_PIPEOFFSETS,
141
	CURSOR_OFFSETS,
142 143
};

144
static const struct intel_device_info intel_i965gm_info = {
145
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
146
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
147
	.has_overlay = 1,
148
	.supports_tv = 1,
149
	.ring_mask = RENDER_RING,
150
	GEN_DEFAULT_PIPEOFFSETS,
151
	CURSOR_OFFSETS,
152 153
};

154
static const struct intel_device_info intel_g33_info = {
155
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
156
	.need_gfx_hws = 1, .has_hotplug = 1,
157
	.has_overlay = 1,
158
	.ring_mask = RENDER_RING,
159
	GEN_DEFAULT_PIPEOFFSETS,
160
	CURSOR_OFFSETS,
161 162
};

163
static const struct intel_device_info intel_g45_info = {
164
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
165
	.has_pipe_cxsr = 1, .has_hotplug = 1,
166
	.ring_mask = RENDER_RING | BSD_RING,
167
	GEN_DEFAULT_PIPEOFFSETS,
168
	CURSOR_OFFSETS,
169 170
};

171
static const struct intel_device_info intel_gm45_info = {
172
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
173
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
174
	.has_pipe_cxsr = 1, .has_hotplug = 1,
175
	.supports_tv = 1,
176
	.ring_mask = RENDER_RING | BSD_RING,
177
	GEN_DEFAULT_PIPEOFFSETS,
178
	CURSOR_OFFSETS,
179 180
};

181
static const struct intel_device_info intel_pineview_info = {
182
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
183
	.need_gfx_hws = 1, .has_hotplug = 1,
184
	.has_overlay = 1,
185
	GEN_DEFAULT_PIPEOFFSETS,
186
	CURSOR_OFFSETS,
187 188
};

189
static const struct intel_device_info intel_ironlake_d_info = {
190
	.gen = 5, .num_pipes = 2,
191
	.need_gfx_hws = 1, .has_hotplug = 1,
192
	.ring_mask = RENDER_RING | BSD_RING,
193
	GEN_DEFAULT_PIPEOFFSETS,
194
	CURSOR_OFFSETS,
195 196
};

197
static const struct intel_device_info intel_ironlake_m_info = {
198
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
199
	.need_gfx_hws = 1, .has_hotplug = 1,
200
	.has_fbc = 1,
201
	.ring_mask = RENDER_RING | BSD_RING,
202
	GEN_DEFAULT_PIPEOFFSETS,
203
	CURSOR_OFFSETS,
204 205
};

206
static const struct intel_device_info intel_sandybridge_d_info = {
207
	.gen = 6, .num_pipes = 2,
208
	.need_gfx_hws = 1, .has_hotplug = 1,
209
	.has_fbc = 1,
210
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
211
	.has_llc = 1,
212
	GEN_DEFAULT_PIPEOFFSETS,
213
	CURSOR_OFFSETS,
214 215
};

216
static const struct intel_device_info intel_sandybridge_m_info = {
217
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
218
	.need_gfx_hws = 1, .has_hotplug = 1,
219
	.has_fbc = 1,
220
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
221
	.has_llc = 1,
222
	GEN_DEFAULT_PIPEOFFSETS,
223
	CURSOR_OFFSETS,
224 225
};

226 227 228
#define GEN7_FEATURES  \
	.gen = 7, .num_pipes = 3, \
	.need_gfx_hws = 1, .has_hotplug = 1, \
229
	.has_fbc = 1, \
230
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231
	.has_llc = 1
232

233
static const struct intel_device_info intel_ivybridge_d_info = {
234 235
	GEN7_FEATURES,
	.is_ivybridge = 1,
236
	GEN_DEFAULT_PIPEOFFSETS,
237
	IVB_CURSOR_OFFSETS,
238 239 240
};

static const struct intel_device_info intel_ivybridge_m_info = {
241 242 243
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.is_mobile = 1,
244
	GEN_DEFAULT_PIPEOFFSETS,
245
	IVB_CURSOR_OFFSETS,
246 247
};

248 249 250 251
static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
252
	GEN_DEFAULT_PIPEOFFSETS,
253
	IVB_CURSOR_OFFSETS,
254 255
};

256
static const struct intel_device_info intel_valleyview_m_info = {
257 258 259
	GEN7_FEATURES,
	.is_mobile = 1,
	.num_pipes = 2,
260
	.is_valleyview = 1,
261
	.display_mmio_offset = VLV_DISPLAY_BASE,
262
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
263
	.has_llc = 0, /* legal, last one wins */
264
	GEN_DEFAULT_PIPEOFFSETS,
265
	CURSOR_OFFSETS,
266 267 268
};

static const struct intel_device_info intel_valleyview_d_info = {
269 270
	GEN7_FEATURES,
	.num_pipes = 2,
271
	.is_valleyview = 1,
272
	.display_mmio_offset = VLV_DISPLAY_BASE,
273
	.has_fbc = 0, /* legal, last one wins */
B
Ben Widawsky 已提交
274
	.has_llc = 0, /* legal, last one wins */
275
	GEN_DEFAULT_PIPEOFFSETS,
276
	CURSOR_OFFSETS,
277 278
};

279
static const struct intel_device_info intel_haswell_d_info = {
280 281
	GEN7_FEATURES,
	.is_haswell = 1,
282
	.has_ddi = 1,
283
	.has_fpga_dbg = 1,
284
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285
	GEN_DEFAULT_PIPEOFFSETS,
286
	IVB_CURSOR_OFFSETS,
287 288 289
};

static const struct intel_device_info intel_haswell_m_info = {
290 291 292
	GEN7_FEATURES,
	.is_haswell = 1,
	.is_mobile = 1,
293
	.has_ddi = 1,
294
	.has_fpga_dbg = 1,
295
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296
	GEN_DEFAULT_PIPEOFFSETS,
297
	IVB_CURSOR_OFFSETS,
298 299
};

B
Ben Widawsky 已提交
300
static const struct intel_device_info intel_broadwell_d_info = {
301
	.gen = 8, .num_pipes = 3,
B
Ben Widawsky 已提交
302 303 304 305
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
306
	.has_fpga_dbg = 1,
B
Ben Widawsky 已提交
307
	.has_fbc = 1,
308
	GEN_DEFAULT_PIPEOFFSETS,
309
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
310 311 312
};

static const struct intel_device_info intel_broadwell_m_info = {
313
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
B
Ben Widawsky 已提交
314 315 316 317
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.has_llc = 1,
	.has_ddi = 1,
318
	.has_fpga_dbg = 1,
B
Ben Widawsky 已提交
319
	.has_fbc = 1,
320
	GEN_DEFAULT_PIPEOFFSETS,
321
	IVB_CURSOR_OFFSETS,
B
Ben Widawsky 已提交
322 323
};

324 325 326
static const struct intel_device_info intel_broadwell_gt3d_info = {
	.gen = 8, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
327
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328 329
	.has_llc = 1,
	.has_ddi = 1,
330
	.has_fpga_dbg = 1,
331 332
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
333
	IVB_CURSOR_OFFSETS,
334 335 336 337 338
};

static const struct intel_device_info intel_broadwell_gt3m_info = {
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
339
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340 341
	.has_llc = 1,
	.has_ddi = 1,
342
	.has_fpga_dbg = 1,
343 344
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
345
	IVB_CURSOR_OFFSETS,
346 347
};

348 349
static const struct intel_device_info intel_cherryview_info = {
	.is_preliminary = 1,
350
	.gen = 8, .num_pipes = 3,
351 352 353 354
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.is_valleyview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
355
	GEN_CHV_PIPEOFFSETS,
356
	CURSOR_OFFSETS,
357 358
};

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
/*
 * Make sure any device matches here are from most specific to most
 * general.  For example, since the Quanta match is based on the subsystem
 * and subvendor IDs, we need it to come before the more general IVB
 * PCI ID matches, otherwise we'll use the wrong info struct above.
 */
#define INTEL_PCI_IDS \
	INTEL_I830_IDS(&intel_i830_info),	\
	INTEL_I845G_IDS(&intel_845g_info),	\
	INTEL_I85X_IDS(&intel_i85x_info),	\
	INTEL_I865G_IDS(&intel_i865g_info),	\
	INTEL_I915G_IDS(&intel_i915g_info),	\
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
	INTEL_I945G_IDS(&intel_i945g_info),	\
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
	INTEL_I965G_IDS(&intel_i965g_info),	\
	INTEL_G33_IDS(&intel_g33_info),		\
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
	INTEL_GM45_IDS(&intel_gm45_info), 	\
	INTEL_G45_IDS(&intel_g45_info), 	\
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
B
Ben Widawsky 已提交
390
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
391 392 393
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
394 395
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
	INTEL_CHV_IDS(&intel_cherryview_info)
396

397
static const struct pci_device_id pciidlist[] = {		/* aka */
398
	INTEL_PCI_IDS,
399
	{0, 0, 0}
L
Linus Torvalds 已提交
400 401
};

J
Jesse Barnes 已提交
402 403 404 405
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif

406
void intel_detect_pch(struct drm_device *dev)
407 408
{
	struct drm_i915_private *dev_priv = dev->dev_private;
409
	struct pci_dev *pch = NULL;
410

B
Ben Widawsky 已提交
411 412 413 414 415 416 417 418
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

419 420 421 422 423
	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
424 425 426 427 428
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
429
	 */
430
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
431
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
432
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
433
			dev_priv->pch_id = id;
434

435 436 437
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
438
				WARN_ON(!IS_GEN5(dev));
439
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
440 441
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
442
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
J
Jesse Barnes 已提交
443 444 445
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
446
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
447
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
448 449 450
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
451
				WARN_ON(!IS_HASWELL(dev));
452
				WARN_ON(IS_ULT(dev));
453 454 455 456 457 458
			} else if (IS_BROADWELL(dev)) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->pch_id =
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
				DRM_DEBUG_KMS("This is Broadwell, assuming "
					      "LynxPoint LP PCH\n");
459 460 461 462 463
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(!IS_ULT(dev));
464 465 466
			} else
				continue;

467
			break;
468 469
		}
	}
470
	if (!pch)
471 472 473
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
474 475
}

476 477 478
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
479
		return false;
480

481 482
	if (i915.semaphores >= 0)
		return i915.semaphores;
483

484 485 486 487
	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

488 489 490 491
	/* Until we get further testing... */
	if (IS_GEN8(dev))
		return false;

492
#ifdef CONFIG_INTEL_IOMMU
493
	/* Enable semaphores on SNB when IO remapping is off */
494 495 496
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
497

498
	return true;
499 500
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);

	dev_priv->long_hpd_port_mask = 0;
	dev_priv->short_hpd_port_mask = 0;
	dev_priv->hpd_event_bits = 0;

	spin_unlock_irq(&dev_priv->irq_lock);

	cancel_work_sync(&dev_priv->dig_port_work);
	cancel_work_sync(&dev_priv->hotplug_work);
	cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct drm_encoder *encoder;

	drm_modeset_lock_all(dev);
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);

		if (intel_encoder->suspend)
			intel_encoder->suspend(intel_encoder);
	}
	drm_modeset_unlock_all(dev);
}

531
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
532 533
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume);
534

535
static int i915_drm_freeze(struct drm_device *dev)
J
Jesse Barnes 已提交
536
{
537
	struct drm_i915_private *dev_priv = dev->dev_private;
538
	struct drm_crtc *crtc;
539
	pci_power_t opregion_target_state;
540

541 542 543 544 545
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

546 547
	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
548
	intel_display_set_init_power(dev_priv, true);
549

550 551
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
552 553
	pci_save_state(dev->pdev);

554
	/* If KMS is active, we do the leavevt stuff here */
555
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
556 557
		int error;

558
		error = i915_gem_suspend(dev);
559
		if (error) {
560
			dev_err(&dev->pdev->dev,
561 562 563
				"GEM idle failed, resume might fail\n");
			return error;
		}
564

565 566
		/*
		 * Disable CRTCs directly since we want to preserve sw state
567
		 * for _thaw. Also, power gate the CRTC power wells.
568
		 */
569
		drm_modeset_lock_all(dev);
570 571
		for_each_crtc(dev, crtc)
			intel_crtc_control(crtc, false);
572
		drm_modeset_unlock_all(dev);
573

574
		intel_dp_mst_suspend(dev);
575 576 577

		flush_delayed_work(&dev_priv->rps.delayed_resume_work);

578
		intel_runtime_pm_disable_interrupts(dev);
579
		intel_hpd_cancel_work(dev_priv);
580

581 582
		intel_suspend_encoders(dev_priv);

583 584
		intel_suspend_gt_powersave(dev);

585
		intel_modeset_suspend_hw(dev);
586 587
	}

588 589
	i915_gem_suspend_gtt_mappings(dev);

590 591
	i915_save_state(dev);

592 593 594
	opregion_target_state = PCI_D3cold;
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
595
		opregion_target_state = PCI_D1;
596
#endif
597 598
	intel_opregion_notify_adapter(dev, opregion_target_state);

599
	intel_uncore_forcewake_reset(dev, false);
600
	intel_opregion_fini(dev);
601

602
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
603

604 605
	dev_priv->suspend_count++;

606 607
	intel_display_set_init_power(dev_priv, false);

608
	return 0;
609 610
}

611
int i915_suspend(struct drm_device *dev, pm_message_t state)
612 613 614 615 616 617 618 619 620 621 622 623
{
	int error;

	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (state.event == PM_EVENT_PRETHAW)
		return 0;

624 625 626

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
627

628 629 630 631
	error = i915_drm_freeze(dev);
	if (error)
		return error;

632 633 634 635 636
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
J
Jesse Barnes 已提交
637 638 639 640

	return 0;
}

641
static int i915_drm_thaw_early(struct drm_device *dev)
J
Jesse Barnes 已提交
642
{
643
	struct drm_i915_private *dev_priv = dev->dev_private;
644
	int ret;
645

646 647 648
	ret = intel_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
649

650
	intel_uncore_early_sanitize(dev, true);
651
	intel_uncore_sanitize(dev);
652 653
	intel_power_domains_init_hw(dev_priv);

654
	return ret;
655 656 657 658 659
}

static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
660 661 662 663 664 665 666 667

	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
	    restore_gtt_mappings) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

668
	i915_restore_state(dev);
669
	intel_opregion_setup(dev);
670

671 672
	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
P
Paulo Zanoni 已提交
673
		intel_init_pch_refclk(dev);
674
		drm_mode_config_reset(dev);
675

676
		mutex_lock(&dev->struct_mutex);
677 678 679 680
		if (i915_gem_init_hw(dev)) {
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
		}
681
		mutex_unlock(&dev->struct_mutex);
682

683
		intel_runtime_pm_restore_interrupts(dev);
684

685
		intel_modeset_init_hw(dev);
686

687 688 689 690 691 692 693 694 695
		{
			unsigned long irqflags;
			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
			if (dev_priv->display.hpd_irq_setup)
				dev_priv->display.hpd_irq_setup(dev);
			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
		}

		intel_dp_mst_resume(dev);
696 697 698
		drm_modeset_lock_all(dev);
		intel_modeset_setup_hw_state(dev, true);
		drm_modeset_unlock_all(dev);
699 700 701 702 703 704 705

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
706
		intel_hpd_init(dev);
707
		/* Config may have changed between suspend and resume */
708
		drm_helper_hpd_irq_event(dev);
J
Jesse Barnes 已提交
709
	}
710

711 712
	intel_opregion_init(dev);

713
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
714

715 716 717
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
718

719 720
	intel_opregion_notify_adapter(dev, PCI_D0);

721
	return 0;
722 723
}

724 725
static int i915_drm_thaw(struct drm_device *dev)
{
726
	if (drm_core_check_feature(dev, DRIVER_MODESET))
727
		i915_check_and_clear_faults(dev);
728

729
	return __i915_drm_thaw(dev, true);
730 731
}

732
static int i915_resume_early(struct drm_device *dev)
733
{
734 735 736
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

737 738 739 740 741 742 743 744 745
	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
746 747 748 749 750
	if (pci_enable_device(dev->pdev))
		return -EIO;

	pci_set_master(dev->pdev);

751 752 753 754 755 756 757 758
	return i915_drm_thaw_early(dev);
}

int i915_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

759 760
	/*
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
761 762
	 * earlier) need to restore the GTT mappings since the BIOS might clear
	 * all our scratch PTEs.
763
	 */
764
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
765 766 767 768 769
	if (ret)
		return ret;

	drm_kms_helper_poll_enable(dev);
	return 0;
J
Jesse Barnes 已提交
770 771
}

772 773 774 775 776 777 778 779
static int i915_resume_legacy(struct drm_device *dev)
{
	i915_resume_early(dev);
	i915_resume(dev);

	return 0;
}

780
/**
781
 * i915_reset - reset chip after a hang
782 783 784 785 786 787 788 789 790 791 792 793 794
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
795
int i915_reset(struct drm_device *dev)
796
{
797
	struct drm_i915_private *dev_priv = dev->dev_private;
798
	bool simulated;
799
	int ret;
800

801
	if (!i915.reset)
C
Chris Wilson 已提交
802 803
		return 0;

804
	mutex_lock(&dev->struct_mutex);
805

806
	i915_gem_reset(dev);
807

808 809
	simulated = dev_priv->gpu_error.stop_rings != 0;

810 811 812 813 814 815 816
	ret = intel_gpu_reset(dev);

	/* Also reset the gpu hangman. */
	if (simulated) {
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
		dev_priv->gpu_error.stop_rings = 0;
		if (ret == -ENODEV) {
817 818
			DRM_INFO("Reset not implemented, but ignoring "
				 "error for simulated gpu hangs\n");
819 820
			ret = 0;
		}
821
	}
822

823
	if (ret) {
824
		DRM_ERROR("Failed to reset chip: %i\n", ret);
825
		mutex_unlock(&dev->struct_mutex);
826
		return ret;
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
844 845
			!dev_priv->ums.mm_suspended) {
		dev_priv->ums.mm_suspended = 0;
846

847 848 849
		/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
		dev_priv->gpu_error.reload_in_reset = true;

850
		ret = i915_gem_init_hw(dev);
851 852 853

		dev_priv->gpu_error.reload_in_reset = false;

854
		mutex_unlock(&dev->struct_mutex);
855 856 857 858
		if (ret) {
			DRM_ERROR("Failed hw init on reset %d\n", ret);
			return ret;
		}
859

860
		/*
861 862 863
		 * FIXME: This races pretty badly against concurrent holders of
		 * ring interrupts. This is possible since we've started to drop
		 * dev->struct_mutex in select places when waiting for the gpu.
864
		 */
J
Jeff McGee 已提交
865

866 867 868
		/*
		 * rps/rc6 re-init is necessary to restore state lost after the
		 * reset and the re-install of gt irqs. Skip for ironlake per
J
Jeff McGee 已提交
869
		 * previous concerns that it doesn't respond well to some forms
870 871
		 * of re-init after reset.
		 */
872
		if (INTEL_INFO(dev)->gen > 5)
873
			intel_reset_gt_powersave(dev);
J
Jeff McGee 已提交
874

875
		intel_hpd_init(dev);
876 877
	} else {
		mutex_unlock(&dev->struct_mutex);
878 879 880 881 882
	}

	return 0;
}

883
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
884
{
885 886 887
	struct intel_device_info *intel_info =
		(struct intel_device_info *) ent->driver_data;

888
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
889 890 891 892 893
		DRM_INFO("This hardware requires preliminary hardware support.\n"
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
		return -ENODEV;
	}

894 895 896 897 898 899 900 901
	/* Only bind to function 0 of the device. Early generations
	 * used function 1 as a placeholder for multi-head. This causes
	 * us confusion instead, especially on the systems where both
	 * functions have the same PCI-ID!
	 */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;

D
Daniel Vetter 已提交
902
	driver.driver_features &= ~(DRIVER_USE_AGP);
903

904
	return drm_get_pci_dev(pdev, ent, &driver);
905 906 907 908 909 910 911 912 913 914
}

static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}

915
static int i915_pm_suspend(struct device *dev)
916
{
917 918
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
919

920 921 922 923
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
924

925 926 927
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

928 929 930 931 932 933 934
	return i915_drm_freeze(drm_dev);
}

static int i915_pm_suspend_late(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
935
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
936
	int ret;
937 938 939 940 941 942 943 944 945 946 947 948

	/*
	 * We have a suspedn ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
949

950
	ret = intel_suspend_complete(dev_priv);
951

952 953 954 955 956 957
	if (ret)
		DRM_ERROR("Suspend complete failed: %d\n", ret);
	else {
		pci_disable_device(pdev);
		pci_set_power_state(pdev, PCI_D3hot);
	}
958

959
	return ret;
960 961
}

962 963 964 965 966 967 968 969
static int i915_pm_resume_early(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume_early(drm_dev);
}

970
static int i915_pm_resume(struct device *dev)
971
{
972 973 974 975
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume(drm_dev);
976 977
}

978
static int i915_pm_freeze(struct device *dev)
979
{
980 981 982 983 984 985 986 987 988
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	return i915_drm_freeze(drm_dev);
989 990
}

991 992 993 994 995 996 997 998
static int i915_pm_thaw_early(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw_early(drm_dev);
}

999
static int i915_pm_thaw(struct device *dev)
1000
{
1001 1002 1003 1004
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw(drm_dev);
1005 1006
}

1007
static int i915_pm_poweroff(struct device *dev)
1008
{
1009 1010 1011
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

1012
	return i915_drm_freeze(drm_dev);
1013 1014
}

1015
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1016
{
P
Paulo Zanoni 已提交
1017
	hsw_enable_pc8(dev_priv);
1018 1019

	return 0;
1020 1021
}

1022 1023
static int snb_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1024 1025 1026
{
	struct drm_device *dev = dev_priv->dev;

1027 1028
	if (rpm_resume)
		intel_init_pch_refclk(dev);
1029 1030

	return 0;
1031 1032
}

1033 1034
static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1035
{
P
Paulo Zanoni 已提交
1036
	hsw_disable_pc8(dev_priv);
1037 1038

	return 0;
1039 1040
}

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;

	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);

	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);

	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);

	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);

	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);

	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);

	/*
	 * Not saving any of:
	 * DFT,		0x9800-0x9EC0
	 * SARB,	0xB000-0xB1FC
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
	 * PCI CFG
	 */
}

static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;

	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);

	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);

	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);

	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);

	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);

	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);

	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);

	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);

	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);

	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);

	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
	 * Preserve the GT allow wake and GFX force clock bit, they are not
	 * be restored, as they are used to control the s0ix suspend/resume
	 * sequence by the caller.
	 */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);

	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);

#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
	/* Wait for a previous force-off to settle */
	if (force_on) {
1244
		err = wait_for(!COND, 20);
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
		if (err) {
			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
			return err;
		}
	}

	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);

	if (!force_on)
		return 0;

1261
	err = wait_for(COND, 20);
1262 1263 1264 1265 1266 1267 1268 1269
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));

	return err;
#undef COND
}

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
	u32 val;
	int err = 0;

	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);

#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
	      allow)
	err = wait_for(COND, 1);
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
	return err;
#undef COND
}

static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				 bool wait_for_on)
{
	u32 mask;
	u32 val;
	int err;

	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
	if (COND)
		return 0;

	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
			wait_for_on ? "on" : "off",
			I915_READ(VLV_GTLC_PW_STATUS));

	/*
	 * RC6 transitioning can be delayed up to 2 msec (see
	 * valleyview_enable_rps), use 3 msec for safety.
	 */
	err = wait_for(COND, 3);
	if (err)
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
			  wait_for_on ? "on" : "off");

	return err;
#undef COND
}

static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;

	DRM_ERROR("GT register access while GT waking disabled\n");
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}

1330
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
	(void)vlv_wait_for_gt_wells(dev_priv, false);

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
	vlv_save_gunit_s0ix_state(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}

1370 1371
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
{
	struct drm_device *dev = dev_priv->dev;
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

	vlv_restore_gunit_s0ix_state(dev_priv);

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

1396 1397 1398 1399
	if (rpm_resume) {
		intel_init_clock_gating(dev);
		i915_gem_restore_fences(dev);
	}
1400 1401 1402 1403

	return ret;
}

1404
static int intel_runtime_suspend(struct device *device)
1405 1406 1407 1408
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1409
	int ret;
1410

1411
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1412 1413
		return -ENODEV;

1414 1415 1416
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

1417
	assert_force_wake_inactive(dev_priv);
1418 1419 1420

	DRM_DEBUG_KMS("Suspending device\n");

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
	/*
	 * We could deadlock here in case another thread holding struct_mutex
	 * calls RPM suspend concurrently, since the RPM suspend will wait
	 * first for this RPM suspend to finish. In this case the concurrent
	 * RPM resume will be followed by its RPM suspend counterpart. Still
	 * for consistency return -EAGAIN, which will reschedule this suspend.
	 */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
		 * Bump the expiration timestamp, otherwise the suspend won't
		 * be rescheduled.
		 */
		pm_runtime_mark_last_busy(device);

		return -EAGAIN;
	}
	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);

1445 1446 1447 1448 1449 1450
	/*
	 * rps.work can't be rearmed here, since we get here only after making
	 * sure the GPU is idle and the RPS freq is set to the minimum. See
	 * intel_mark_idle().
	 */
	cancel_work_sync(&dev_priv->rps.work);
1451 1452
	intel_runtime_pm_disable_interrupts(dev);

1453
	ret = intel_suspend_complete(dev_priv);
1454 1455 1456 1457 1458 1459
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
		intel_runtime_pm_restore_interrupts(dev);

		return ret;
	}
1460

1461
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1462
	dev_priv->pm.suspended = true;
1463 1464

	/*
1465 1466
	 * FIXME: We really should find a document that references the arguments
	 * used below!
1467
	 */
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
	if (IS_HASWELL(dev)) {
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
		intel_opregion_notify_adapter(dev, PCI_D1);
	} else {
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it. Let's
		 * assume the other non-Haswell platforms will stay the same as
		 * Broadwell.
		 */
		intel_opregion_notify_adapter(dev, PCI_D3hot);
	}
1488

1489
	DRM_DEBUG_KMS("Device suspended\n");
1490 1491 1492
	return 0;
}

1493
static int intel_runtime_resume(struct device *device)
1494 1495 1496 1497
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
1498
	int ret;
1499

1500 1501
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;
1502 1503 1504

	DRM_DEBUG_KMS("Resuming device\n");

1505
	intel_opregion_notify_adapter(dev, PCI_D0);
1506 1507
	dev_priv->pm.suspended = false;

1508
	ret = intel_resume_prepare(dev_priv, true);
1509 1510 1511 1512
	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
1513 1514 1515
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

1516
	intel_runtime_pm_restore_interrupts(dev);
1517
	intel_reset_gt_powersave(dev);
1518

1519 1520 1521 1522 1523 1524
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
1525 1526
}

1527 1528 1529 1530
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
1531 1532 1533 1534 1535
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

1536
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1537
		ret = hsw_suspend_complete(dev_priv);
1538
	else if (IS_VALLEYVIEW(dev))
1539
		ret = vlv_suspend_complete(dev_priv);
1540 1541
	else
		ret = 0;
1542 1543 1544 1545

	return ret;
}

1546 1547 1548 1549 1550 1551 1552
/*
 * This function implements common functionality of runtime and system
 * resume sequence. Variable rpm_resume used for implementing different
 * code paths.
 */
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
1553 1554 1555 1556
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

1557
	if (IS_GEN6(dev))
1558
		ret = snb_resume_prepare(dev_priv, rpm_resume);
1559
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1560
		ret = hsw_resume_prepare(dev_priv, rpm_resume);
1561
	else if (IS_VALLEYVIEW(dev))
1562
		ret = vlv_resume_prepare(dev_priv, rpm_resume);
1563 1564
	else
		ret = 0;
1565 1566 1567 1568

	return ret;
}

1569
static const struct dev_pm_ops i915_pm_ops = {
1570
	.suspend = i915_pm_suspend,
1571 1572
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
1573 1574
	.resume = i915_pm_resume,
	.freeze = i915_pm_freeze,
1575
	.thaw_early = i915_pm_thaw_early,
1576 1577
	.thaw = i915_pm_thaw,
	.poweroff = i915_pm_poweroff,
1578
	.restore_early = i915_pm_resume_early,
1579
	.restore = i915_pm_resume,
1580 1581
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
1582 1583
};

1584
static const struct vm_operations_struct i915_gem_vm_ops = {
1585
	.fault = i915_gem_fault,
1586 1587
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
1588 1589
};

1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

L
Linus Torvalds 已提交
1604
static struct drm_driver driver = {
1605 1606
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1607
	 */
1608
	.driver_features =
D
Daniel Vetter 已提交
1609
	    DRIVER_USE_AGP |
1610 1611
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
	    DRIVER_RENDER,
1612
	.load = i915_driver_load,
J
Jesse Barnes 已提交
1613
	.unload = i915_driver_unload,
1614
	.open = i915_driver_open,
1615 1616
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
1617
	.postclose = i915_driver_postclose,
1618 1619 1620

	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
	.suspend = i915_suspend,
1621
	.resume = i915_resume_legacy,
1622

1623
	.device_is_agp = i915_driver_device_is_agp,
1624 1625
	.master_create = i915_master_create,
	.master_destroy = i915_master_destroy,
1626
#if defined(CONFIG_DEBUG_FS)
1627 1628
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
1629
#endif
1630
	.gem_free_object = i915_gem_free_object,
1631
	.gem_vm_ops = &i915_gem_vm_ops,
1632 1633 1634 1635 1636 1637

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1638 1639
	.dumb_create = i915_gem_dumb_create,
	.dumb_map_offset = i915_gem_mmap_gtt,
1640
	.dumb_destroy = drm_gem_dumb_destroy,
L
Linus Torvalds 已提交
1641
	.ioctls = i915_ioctls,
1642
	.fops = &i915_driver_fops,
1643 1644 1645 1646 1647 1648
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1649 1650
};

1651 1652 1653 1654 1655 1656 1657 1658
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};

L
Linus Torvalds 已提交
1659 1660 1661
static int __init i915_init(void)
{
	driver.num_ioctls = i915_max_ioctl;
J
Jesse Barnes 已提交
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672

	/*
	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
	 * explicitly disabled with the module pararmeter.
	 *
	 * Otherwise, just follow the parameter (defaulting to off).
	 *
	 * Allow optional vga_text_mode_force boot option to override
	 * the default behavior.
	 */
#if defined(CONFIG_DRM_I915_KMS)
1673
	if (i915.modeset != 0)
J
Jesse Barnes 已提交
1674 1675
		driver.driver_features |= DRIVER_MODESET;
#endif
1676
	if (i915.modeset == 1)
J
Jesse Barnes 已提交
1677 1678 1679
		driver.driver_features |= DRIVER_MODESET;

#ifdef CONFIG_VGA_CONSOLE
1680
	if (vgacon_text_force() && i915.modeset == -1)
J
Jesse Barnes 已提交
1681 1682 1683
		driver.driver_features &= ~DRIVER_MODESET;
#endif

D
Daniel Vetter 已提交
1684
	if (!(driver.driver_features & DRIVER_MODESET)) {
1685
		driver.get_vblank_timestamp = NULL;
D
Daniel Vetter 已提交
1686 1687
#ifndef CONFIG_DRM_I915_UMS
		/* Silently fail loading to not upset userspace. */
1688
		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
D
Daniel Vetter 已提交
1689 1690 1691
		return 0;
#endif
	}
1692

1693
	return drm_pci_init(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1694 1695 1696 1697
}

static void __exit i915_exit(void)
{
1698 1699 1700 1701 1702
#ifndef CONFIG_DRM_I915_UMS
	if (!(driver.driver_features & DRIVER_MODESET))
		return; /* Never loaded a driver. */
#endif

1703
	drm_pci_exit(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1704 1705 1706 1707 1708
}

module_init(i915_init);
module_exit(i915_exit);

1709 1710
MODULE_AUTHOR("Tungsten Graphics, Inc.");

D
Dave Airlie 已提交
1711
MODULE_DESCRIPTION(DRIVER_DESC);
L
Linus Torvalds 已提交
1712
MODULE_LICENSE("GPL and additional rights");