i915_drv.c 32.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29

30
#include <linux/device.h>
L
Linus Torvalds 已提交
31 32 33 34
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
35
#include "intel_drv.h"
L
Linus Torvalds 已提交
36

J
Jesse Barnes 已提交
37
#include <linux/console.h>
38
#include <linux/module.h>
39
#include "drm_crtc_helper.h"
J
Jesse Barnes 已提交
40

41
static int i915_modeset __read_mostly = -1;
J
Jesse Barnes 已提交
42
module_param_named(modeset, i915_modeset, int, 0400);
43 44 45
MODULE_PARM_DESC(modeset,
		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
		"1=on, -1=force vga console preference [default])");
J
Jesse Barnes 已提交
46

47
unsigned int i915_fbpercrtc __always_unused = 0;
J
Jesse Barnes 已提交
48
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
L
Linus Torvalds 已提交
49

50
int i915_panel_ignore_lid __read_mostly = 0;
51
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52 53 54
MODULE_PARM_DESC(panel_ignore_lid,
		"Override lid status (0=autodetect [default], 1=lid open, "
		"-1=lid closed)");
55

56
unsigned int i915_powersave __read_mostly = 1;
57
module_param_named(powersave, i915_powersave, int, 0600);
58 59
MODULE_PARM_DESC(powersave,
		"Enable powersavings, fbc, downclocking, etc. (default: true)");
60

61
int i915_semaphores __read_mostly = -1;
62
module_param_named(semaphores, i915_semaphores, int, 0600);
63
MODULE_PARM_DESC(semaphores,
64
		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65

66
int i915_enable_rc6 __read_mostly = -1;
67
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
68
MODULE_PARM_DESC(i915_enable_rc6,
69 70 71 72 73
		"Enable power-saving render C-state 6. "
		"Different stages can be selected via bitmask values "
		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
		"default: -1 (use per-chip default)");
C
Chris Wilson 已提交
74

75
int i915_enable_fbc __read_mostly = -1;
76
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
77 78
MODULE_PARM_DESC(i915_enable_fbc,
		"Enable frame buffer compression for power savings "
79
		"(default: -1 (use per-chip default))");
80

81
unsigned int i915_lvds_downclock __read_mostly = 0;
82
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
83 84 85
MODULE_PARM_DESC(lvds_downclock,
		"Use panel (LVDS/eDP) downclocking for power savings "
		"(default: false)");
86

87 88 89 90 91 92
int i915_lvds_channel_mode __read_mostly;
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
		 "Specify LVDS channel mode "
		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");

93
int i915_panel_use_ssc __read_mostly = -1;
94
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
95 96
MODULE_PARM_DESC(lvds_use_ssc,
		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
97
		"(default: auto from VBT)");
98

99
int i915_vbt_sdvo_panel_type __read_mostly = -1;
100
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
101
MODULE_PARM_DESC(vbt_sdvo_panel_type,
102 103
		"Override/Ignore selection of SDVO panel mode in the VBT "
		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
104

105
static bool i915_try_reset __read_mostly = true;
C
Chris Wilson 已提交
106
module_param_named(reset, i915_try_reset, bool, 0600);
107
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
C
Chris Wilson 已提交
108

109
bool i915_enable_hangcheck __read_mostly = true;
110
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
111 112 113 114
MODULE_PARM_DESC(enable_hangcheck,
		"Periodically check GPU activity for detecting hangs. "
		"WARNING: Disabling this can cause system wide hangs. "
		"(default: true)");
115

116 117
int i915_enable_ppgtt __read_mostly = -1;
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
D
Daniel Vetter 已提交
118 119 120
MODULE_PARM_DESC(i915_enable_ppgtt,
		"Enable PPGTT (default: true)");

121
static struct drm_driver driver;
122
extern int intel_agp_enabled;
123

124
#define INTEL_VGA_DEVICE(id, info) {		\
125
	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
126
	.class_mask = 0xff0000,			\
127 128 129 130
	.vendor = 0x8086,			\
	.device = id,				\
	.subvendor = PCI_ANY_ID,		\
	.subdevice = PCI_ANY_ID,		\
131 132
	.driver_data = (unsigned long) info }

133
static const struct intel_device_info intel_i830_info = {
134
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
135
	.has_overlay = 1, .overlay_needs_physical = 1,
136 137
};

138
static const struct intel_device_info intel_845g_info = {
139
	.gen = 2,
140
	.has_overlay = 1, .overlay_needs_physical = 1,
141 142
};

143
static const struct intel_device_info intel_i85x_info = {
144
	.gen = 2, .is_i85x = 1, .is_mobile = 1,
145
	.cursor_needs_physical = 1,
146
	.has_overlay = 1, .overlay_needs_physical = 1,
147 148
};

149
static const struct intel_device_info intel_i865g_info = {
150
	.gen = 2,
151
	.has_overlay = 1, .overlay_needs_physical = 1,
152 153
};

154
static const struct intel_device_info intel_i915g_info = {
155
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
156
	.has_overlay = 1, .overlay_needs_physical = 1,
157
};
158
static const struct intel_device_info intel_i915gm_info = {
159
	.gen = 3, .is_mobile = 1,
160
	.cursor_needs_physical = 1,
161
	.has_overlay = 1, .overlay_needs_physical = 1,
162
	.supports_tv = 1,
163
};
164
static const struct intel_device_info intel_i945g_info = {
165
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
166
	.has_overlay = 1, .overlay_needs_physical = 1,
167
};
168
static const struct intel_device_info intel_i945gm_info = {
169
	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
170
	.has_hotplug = 1, .cursor_needs_physical = 1,
171
	.has_overlay = 1, .overlay_needs_physical = 1,
172
	.supports_tv = 1,
173 174
};

175
static const struct intel_device_info intel_i965g_info = {
176
	.gen = 4, .is_broadwater = 1,
177
	.has_hotplug = 1,
178
	.has_overlay = 1,
179 180
};

181
static const struct intel_device_info intel_i965gm_info = {
182
	.gen = 4, .is_crestline = 1,
183
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
184
	.has_overlay = 1,
185
	.supports_tv = 1,
186 187
};

188
static const struct intel_device_info intel_g33_info = {
189
	.gen = 3, .is_g33 = 1,
190
	.need_gfx_hws = 1, .has_hotplug = 1,
191
	.has_overlay = 1,
192 193
};

194
static const struct intel_device_info intel_g45_info = {
195
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
196
	.has_pipe_cxsr = 1, .has_hotplug = 1,
197
	.has_bsd_ring = 1,
198 199
};

200
static const struct intel_device_info intel_gm45_info = {
201
	.gen = 4, .is_g4x = 1,
202
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
203
	.has_pipe_cxsr = 1, .has_hotplug = 1,
204
	.supports_tv = 1,
205
	.has_bsd_ring = 1,
206 207
};

208
static const struct intel_device_info intel_pineview_info = {
209
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
210
	.need_gfx_hws = 1, .has_hotplug = 1,
211
	.has_overlay = 1,
212 213
};

214
static const struct intel_device_info intel_ironlake_d_info = {
215
	.gen = 5,
216
	.need_gfx_hws = 1, .has_hotplug = 1,
217
	.has_bsd_ring = 1,
218
	.has_pch_split = 1,
219 220
};

221
static const struct intel_device_info intel_ironlake_m_info = {
222
	.gen = 5, .is_mobile = 1,
223
	.need_gfx_hws = 1, .has_hotplug = 1,
224
	.has_fbc = 1,
225
	.has_bsd_ring = 1,
226
	.has_pch_split = 1,
227 228
};

229
static const struct intel_device_info intel_sandybridge_d_info = {
230
	.gen = 6,
231
	.need_gfx_hws = 1, .has_hotplug = 1,
232
	.has_bsd_ring = 1,
233
	.has_blt_ring = 1,
234
	.has_llc = 1,
235
	.has_pch_split = 1,
236
	.has_force_wake = 1,
237 238
};

239
static const struct intel_device_info intel_sandybridge_m_info = {
240
	.gen = 6, .is_mobile = 1,
241
	.need_gfx_hws = 1, .has_hotplug = 1,
242
	.has_fbc = 1,
243
	.has_bsd_ring = 1,
244
	.has_blt_ring = 1,
245
	.has_llc = 1,
246
	.has_pch_split = 1,
247
	.has_force_wake = 1,
248 249
};

250 251 252 253 254
static const struct intel_device_info intel_ivybridge_d_info = {
	.is_ivybridge = 1, .gen = 7,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_bsd_ring = 1,
	.has_blt_ring = 1,
255
	.has_llc = 1,
256
	.has_pch_split = 1,
257
	.has_force_wake = 1,
258 259 260 261 262 263 264 265
};

static const struct intel_device_info intel_ivybridge_m_info = {
	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
	.has_bsd_ring = 1,
	.has_blt_ring = 1,
266
	.has_llc = 1,
267
	.has_pch_split = 1,
268
	.has_force_wake = 1,
269 270
};

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
static const struct intel_device_info intel_valleyview_m_info = {
	.gen = 7, .is_mobile = 1,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_fbc = 0,
	.has_bsd_ring = 1,
	.has_blt_ring = 1,
	.is_valleyview = 1,
};

static const struct intel_device_info intel_valleyview_d_info = {
	.gen = 7,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_fbc = 0,
	.has_bsd_ring = 1,
	.has_blt_ring = 1,
	.is_valleyview = 1,
};

289 290 291 292 293 294 295
static const struct intel_device_info intel_haswell_d_info = {
	.is_haswell = 1, .gen = 7,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_bsd_ring = 1,
	.has_blt_ring = 1,
	.has_llc = 1,
	.has_pch_split = 1,
296
	.has_force_wake = 1,
297 298 299 300 301 302 303 304 305
};

static const struct intel_device_info intel_haswell_m_info = {
	.is_haswell = 1, .gen = 7, .is_mobile = 1,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_bsd_ring = 1,
	.has_blt_ring = 1,
	.has_llc = 1,
	.has_pch_split = 1,
306
	.has_force_wake = 1,
307 308
};

309 310 311 312
static const struct pci_device_id pciidlist[] = {		/* aka */
	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
313
	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
336
	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
337 338 339 340
	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
341
	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
342 343
	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
344
	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
345
	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
346
	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
347
	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
348 349 350 351 352
	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
353
	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
354 355 356 357 358 359 360
	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
361
	{0, 0, 0}
L
Linus Torvalds 已提交
362 363
};

J
Jesse Barnes 已提交
364 365 366 367
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif

368
#define INTEL_PCH_DEVICE_ID_MASK	0xff00
369
#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
370
#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
J
Jesse Barnes 已提交
371
#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
372
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
373

374
void intel_detect_pch(struct drm_device *dev)
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct pci_dev *pch;

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 */
	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
	if (pch) {
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
			int id;
			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;

391 392
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
393
				dev_priv->num_pch_pll = 2;
394 395
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
396
				dev_priv->pch_type = PCH_CPT;
397
				dev_priv->num_pch_pll = 2;
398
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
J
Jesse Barnes 已提交
399 400 401
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
402
				dev_priv->num_pch_pll = 2;
J
Jesse Barnes 已提交
403
				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
404 405
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
406
				dev_priv->num_pch_pll = 0;
407
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
408
			}
409
			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
410 411 412 413 414
		}
		pci_dev_put(pch);
	}
}

415 416 417 418 419 420 421 422
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
		return 0;

	if (i915_semaphores >= 0)
		return i915_semaphores;

423
#ifdef CONFIG_INTEL_IOMMU
424
	/* Enable semaphores on SNB when IO remapping is off */
425 426 427
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
428 429 430 431

	return 1;
}

432
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
{
	int count;

	count = 0;
	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
		udelay(10);

	I915_WRITE_NOTRACE(FORCEWAKE, 1);
	POSTING_READ(FORCEWAKE);

	count = 0;
	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
		udelay(10);
}

448 449 450 451 452 453 454 455
void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
	int count;

	count = 0;
	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
		udelay(10);

456
	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
457 458 459 460 461 462 463
	POSTING_READ(FORCEWAKE_MT);

	count = 0;
	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
		udelay(10);
}

464 465 466 467 468 469 470 471
/*
 * Generally this is called implicitly by the register read function. However,
 * if some sequence requires the GT to not power down then this function should
 * be called at the beginning of the sequence followed by a call to
 * gen6_gt_force_wake_put() at the end of the sequence.
 */
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
472
	unsigned long irqflags;
473

474 475
	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
	if (dev_priv->forcewake_count++ == 0)
476
		dev_priv->display.force_wake_get(dev_priv);
477
	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
478 479
}

480 481 482 483 484 485 486 487 488
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
	u32 gtfifodbg;
	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
	     "MMIO read or write has been dropped %x\n", gtfifodbg))
		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}

489
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
490 491
{
	I915_WRITE_NOTRACE(FORCEWAKE, 0);
492 493
	/* The below doubles as a POSTING_READ */
	gen6_gt_check_fifodbg(dev_priv);
494 495
}

496 497
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
498
	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
499 500
	/* The below doubles as a POSTING_READ */
	gen6_gt_check_fifodbg(dev_priv);
501 502
}

503 504 505 506 507
/*
 * see gen6_gt_force_wake_get()
 */
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
508
	unsigned long irqflags;
509

510 511
	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
	if (--dev_priv->forcewake_count == 0)
512
		dev_priv->display.force_wake_put(dev_priv);
513
	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
514 515
}

516
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
517
{
518 519
	int ret = 0;

520
	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
521 522 523 524 525 526
		int loop = 500;
		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
			udelay(10);
			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
		}
527 528
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
			++ret;
529
		dev_priv->gt_fifo_count = fifo;
530
	}
531
	dev_priv->gt_fifo_count--;
532 533

	return ret;
534 535
}

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
	int count;

	count = 0;

	/* Already awake? */
	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
		return;

	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
	POSTING_READ(FORCEWAKE_VLV);

	count = 0;
	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
		udelay(10);
}

void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
	/* FIXME: confirm VLV behavior with Punit folks */
	POSTING_READ(FORCEWAKE_VLV);
}

561
static int i915_drm_freeze(struct drm_device *dev)
J
Jesse Barnes 已提交
562
{
563 564
	struct drm_i915_private *dev_priv = dev->dev_private;

565 566
	drm_kms_helper_poll_disable(dev);

J
Jesse Barnes 已提交
567 568
	pci_save_state(dev->pdev);

569
	/* If KMS is active, we do the leavevt stuff here */
570
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
571 572
		int error = i915_gem_idle(dev);
		if (error) {
573
			dev_err(&dev->pdev->dev,
574 575 576
				"GEM idle failed, resume might fail\n");
			return error;
		}
577
		drm_irq_uninstall(dev);
578 579
	}

580 581
	i915_save_state(dev);

582
	intel_opregion_fini(dev);
583

584 585
	/* Modeset on resume, not lid events */
	dev_priv->modeset_on_lid = 0;
586

587 588 589 590
	console_lock();
	intel_fbdev_set_suspend(dev, 1);
	console_unlock();

591
	return 0;
592 593
}

594
int i915_suspend(struct drm_device *dev, pm_message_t state)
595 596 597 598 599 600 601 602 603 604 605 606
{
	int error;

	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	if (state.event == PM_EVENT_PRETHAW)
		return 0;

607 608 609

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
610

611 612 613 614
	error = i915_drm_freeze(dev);
	if (error)
		return error;

615 616 617 618 619
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
J
Jesse Barnes 已提交
620 621 622 623

	return 0;
}

624
static int i915_drm_thaw(struct drm_device *dev)
J
Jesse Barnes 已提交
625
{
626
	struct drm_i915_private *dev_priv = dev->dev_private;
627
	int error = 0;
628

629 630 631 632 633 634
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

635
	i915_restore_state(dev);
636
	intel_opregion_setup(dev);
637

638 639
	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
640 641 642
		if (HAS_PCH_SPLIT(dev))
			ironlake_init_pch_refclk(dev);

643 644 645
		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

646
		error = i915_gem_init_hw(dev);
647
		mutex_unlock(&dev->struct_mutex);
648

649
		intel_modeset_init_hw(dev);
650
		drm_mode_config_reset(dev);
651
		drm_irq_install(dev);
652

653
		/* Resume the modeset for every activated CRTC */
654
		mutex_lock(&dev->mode_config.mutex);
655
		drm_helper_resume_force_mode(dev);
656
		mutex_unlock(&dev->mode_config.mutex);
J
Jesse Barnes 已提交
657
	}
658

659 660
	intel_opregion_init(dev);

661
	dev_priv->modeset_on_lid = 0;
662

663 664 665
	console_lock();
	intel_fbdev_set_suspend(dev, 0);
	console_unlock();
666 667 668
	return error;
}

669
int i915_resume(struct drm_device *dev)
670
{
671 672
	int ret;

673 674 675
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

676 677 678 679 680
	if (pci_enable_device(dev->pdev))
		return -EIO;

	pci_set_master(dev->pdev);

681 682 683 684 685 686
	ret = i915_drm_thaw(dev);
	if (ret)
		return ret;

	drm_kms_helper_poll_enable(dev);
	return 0;
J
Jesse Barnes 已提交
687 688
}

689
static int i8xx_do_reset(struct drm_device *dev)
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (IS_I85X(dev))
		return -ENODEV;

	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
	POSTING_READ(D_STATE);

	if (IS_I830(dev) || IS_845G(dev)) {
		I915_WRITE(DEBUG_RESET_I830,
			   DEBUG_RESET_DISPLAY |
			   DEBUG_RESET_RENDER |
			   DEBUG_RESET_FULL);
		POSTING_READ(DEBUG_RESET_I830);
		msleep(1);

		I915_WRITE(DEBUG_RESET_I830, 0);
		POSTING_READ(DEBUG_RESET_I830);
	}

	msleep(1);

	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
	POSTING_READ(D_STATE);

	return 0;
}

719 720 721
static int i965_reset_complete(struct drm_device *dev)
{
	u8 gdrst;
722
	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
D
Daniel Vetter 已提交
723
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
724 725
}

726
static int i965_do_reset(struct drm_device *dev)
727
{
728
	int ret;
729 730
	u8 gdrst;

731 732 733 734 735
	/*
	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
	 * well as the reset bit (GR/bit 0).  Setting the GR bit
	 * triggers the reset; when done, the hardware will clear it.
	 */
736
	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
737
	pci_write_config_byte(dev->pdev, I965_GDRST,
738 739 740 741 742 743 744 745 746 747 748
			      gdrst | GRDOM_RENDER |
			      GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	/* We can't reset render&media without also resetting display ... */
	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
	pci_write_config_byte(dev->pdev, I965_GDRST,
			      gdrst | GRDOM_MEDIA |
			      GRDOM_RESET_ENABLE);
749 750 751 752

	return wait_for(i965_reset_complete(dev), 500);
}

753
static int ironlake_do_reset(struct drm_device *dev)
754 755
{
	struct drm_i915_private *dev_priv = dev->dev_private;
756 757 758 759 760 761 762 763 764 765 766 767
	u32 gdrst;
	int ret;

	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
	if (ret)
		return ret;

	/* We can't reset render&media without also resetting display ... */
	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
768
	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
769
		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
770
	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
J
Jesse Barnes 已提交
771 772
}

773
static int gen6_do_reset(struct drm_device *dev)
774 775
{
	struct drm_i915_private *dev_priv = dev->dev_private;
776 777
	int	ret;
	unsigned long irqflags;
778

779 780 781
	/* Hold gt_lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
782
	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
783 784 785 786 787 788 789 790 791 792 793 794 795

	/* Reset the chip */

	/* GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);

	/* Spin waiting for the device to ack the reset request */
	ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);

	/* If reset with a user forcewake, try to restore, otherwise turn it off */
796 797
	if (dev_priv->forcewake_count)
		dev_priv->display.force_wake_get(dev_priv);
798 799 800 801 802 803
	else
		dev_priv->display.force_wake_put(dev_priv);

	/* Restore fifo count */
	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);

804 805
	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
	return ret;
806 807
}

808
static int intel_gpu_reset(struct drm_device *dev)
809
{
810
	struct drm_i915_private *dev_priv = dev->dev_private;
811 812 813 814 815
	int ret = -ENODEV;

	switch (INTEL_INFO(dev)->gen) {
	case 7:
	case 6:
816
		ret = gen6_do_reset(dev);
817 818
		break;
	case 5:
819
		ret = ironlake_do_reset(dev);
820 821
		break;
	case 4:
822
		ret = i965_do_reset(dev);
823 824
		break;
	case 2:
825
		ret = i8xx_do_reset(dev);
826 827 828
		break;
	}

829 830 831 832 833 834 835 836 837 838 839
	/* Also reset the gpu hangman. */
	if (dev_priv->stop_rings) {
		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
		dev_priv->stop_rings = 0;
		if (ret == -ENODEV) {
			DRM_ERROR("Reset not implemented, but ignoring "
				  "error for simulated gpu hangs\n");
			ret = 0;
		}
	}

840 841 842
	return ret;
}

843
/**
844
 * i915_reset - reset chip after a hang
845 846 847 848 849 850 851 852 853 854 855 856 857
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
858
int i915_reset(struct drm_device *dev)
859 860
{
	drm_i915_private_t *dev_priv = dev->dev_private;
861
	int ret;
862

C
Chris Wilson 已提交
863 864 865
	if (!i915_try_reset)
		return 0;

866 867
	if (!mutex_trylock(&dev->struct_mutex))
		return -EBUSY;
868

869 870
	dev_priv->stop_rings = 0;

871
	i915_gem_reset(dev);
872

873
	ret = -ENODEV;
874
	if (get_seconds() - dev_priv->last_gpu_reset < 5)
875
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
876
	else
877
		ret = intel_gpu_reset(dev);
878

879
	dev_priv->last_gpu_reset = get_seconds();
880
	if (ret) {
881
		DRM_ERROR("Failed to reset chip.\n");
882
		mutex_unlock(&dev->struct_mutex);
883
		return ret;
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
901
			!dev_priv->mm.suspended) {
902 903 904
		struct intel_ring_buffer *ring;
		int i;

905
		dev_priv->mm.suspended = 0;
906

907 908
		i915_gem_init_swizzling(dev);

909 910
		for_each_ring(ring, dev_priv, i)
			ring->init(ring);
911

D
Daniel Vetter 已提交
912 913
		i915_gem_init_ppgtt(dev);

914
		mutex_unlock(&dev->struct_mutex);
915 916 917 918

		if (drm_core_check_feature(dev, DRIVER_MODESET))
			intel_modeset_init_hw(dev);

919 920
		drm_irq_uninstall(dev);
		drm_irq_install(dev);
921 922
	} else {
		mutex_unlock(&dev->struct_mutex);
923 924 925 926 927 928
	}

	return 0;
}


929 930 931
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
932 933 934 935 936 937 938 939
	/* Only bind to function 0 of the device. Early generations
	 * used function 1 as a placeholder for multi-head. This causes
	 * us confusion instead, especially on the systems where both
	 * functions have the same PCI-ID!
	 */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;

940
	return drm_get_pci_dev(pdev, ent, &driver);
941 942 943 944 945 946 947 948 949 950
}

static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}

951
static int i915_pm_suspend(struct device *dev)
952
{
953 954 955
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
	int error;
956

957 958 959 960
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
961

962 963 964
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

965 966 967
	error = i915_drm_freeze(drm_dev);
	if (error)
		return error;
968

969 970
	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);
971

972
	return 0;
973 974
}

975
static int i915_pm_resume(struct device *dev)
976
{
977 978 979 980
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_resume(drm_dev);
981 982
}

983
static int i915_pm_freeze(struct device *dev)
984
{
985 986 987 988 989 990 991 992 993
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}

	return i915_drm_freeze(drm_dev);
994 995
}

996
static int i915_pm_thaw(struct device *dev)
997
{
998 999 1000 1001
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

	return i915_drm_thaw(drm_dev);
1002 1003
}

1004
static int i915_pm_poweroff(struct device *dev)
1005
{
1006 1007 1008
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);

1009
	return i915_drm_freeze(drm_dev);
1010 1011
}

1012
static const struct dev_pm_ops i915_pm_ops = {
1013 1014 1015 1016 1017 1018
	.suspend = i915_pm_suspend,
	.resume = i915_pm_resume,
	.freeze = i915_pm_freeze,
	.thaw = i915_pm_thaw,
	.poweroff = i915_pm_poweroff,
	.restore = i915_pm_resume,
1019 1020
};

1021
static const struct vm_operations_struct i915_gem_vm_ops = {
1022
	.fault = i915_gem_fault,
1023 1024
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
1025 1026
};

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.fasync = drm_fasync,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};

L
Linus Torvalds 已提交
1042
static struct drm_driver driver = {
1043 1044
	/* Don't use MTRRs here; the Xserver or userspace app should
	 * deal with them for Intel hardware.
D
Dave Airlie 已提交
1045
	 */
1046 1047
	.driver_features =
	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
1048
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1049
	.load = i915_driver_load,
J
Jesse Barnes 已提交
1050
	.unload = i915_driver_unload,
1051
	.open = i915_driver_open,
1052 1053
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
1054
	.postclose = i915_driver_postclose,
1055 1056 1057 1058 1059

	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
	.suspend = i915_suspend,
	.resume = i915_resume,

1060
	.device_is_agp = i915_driver_device_is_agp,
L
Linus Torvalds 已提交
1061
	.reclaim_buffers = drm_core_reclaim_buffers,
1062 1063
	.master_create = i915_master_create,
	.master_destroy = i915_master_destroy,
1064
#if defined(CONFIG_DEBUG_FS)
1065 1066
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
1067
#endif
1068 1069
	.gem_init_object = i915_gem_init_object,
	.gem_free_object = i915_gem_free_object,
1070
	.gem_vm_ops = &i915_gem_vm_ops,
1071 1072 1073 1074 1075 1076

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,

1077 1078 1079
	.dumb_create = i915_gem_dumb_create,
	.dumb_map_offset = i915_gem_mmap_gtt,
	.dumb_destroy = i915_gem_dumb_destroy,
L
Linus Torvalds 已提交
1080
	.ioctls = i915_ioctls,
1081
	.fops = &i915_driver_fops,
1082 1083 1084 1085 1086 1087
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
L
Linus Torvalds 已提交
1088 1089
};

1090 1091 1092 1093 1094 1095 1096 1097
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};

L
Linus Torvalds 已提交
1098 1099
static int __init i915_init(void)
{
1100 1101 1102 1103 1104
	if (!intel_agp_enabled) {
		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
		return -ENODEV;
	}

L
Linus Torvalds 已提交
1105
	driver.num_ioctls = i915_max_ioctl;
J
Jesse Barnes 已提交
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127

	/*
	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
	 * explicitly disabled with the module pararmeter.
	 *
	 * Otherwise, just follow the parameter (defaulting to off).
	 *
	 * Allow optional vga_text_mode_force boot option to override
	 * the default behavior.
	 */
#if defined(CONFIG_DRM_I915_KMS)
	if (i915_modeset != 0)
		driver.driver_features |= DRIVER_MODESET;
#endif
	if (i915_modeset == 1)
		driver.driver_features |= DRIVER_MODESET;

#ifdef CONFIG_VGA_CONSOLE
	if (vgacon_text_force() && i915_modeset == -1)
		driver.driver_features &= ~DRIVER_MODESET;
#endif

1128 1129 1130
	if (!(driver.driver_features & DRIVER_MODESET))
		driver.get_vblank_timestamp = NULL;

1131
	return drm_pci_init(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1132 1133 1134 1135
}

static void __exit i915_exit(void)
{
1136
	drm_pci_exit(&driver, &i915_pci_driver);
L
Linus Torvalds 已提交
1137 1138 1139 1140 1141
}

module_init(i915_init);
module_exit(i915_exit);

D
Dave Airlie 已提交
1142 1143
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
L
Linus Torvalds 已提交
1144
MODULE_LICENSE("GPL and additional rights");
1145

1146 1147
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1148 1149 1150
	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
	 ((reg) < 0x40000) &&            \
	 ((reg) != FORCEWAKE))
1151

1152 1153 1154 1155
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
	u##x val = 0; \
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1156 1157 1158 1159
		unsigned long irqflags; \
		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
		if (dev_priv->forcewake_count == 0) \
			dev_priv->display.force_wake_get(dev_priv); \
1160
		val = read##y(dev_priv->regs + reg); \
1161 1162 1163
		if (dev_priv->forcewake_count == 0) \
			dev_priv->display.force_wake_put(dev_priv); \
		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	} else { \
		val = read##y(dev_priv->regs + reg); \
	} \
	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
	return val; \
}

__i915_read(8, b)
__i915_read(16, w)
__i915_read(32, l)
__i915_read(64, q)
#undef __i915_read

#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1179
	u32 __fifo_ret = 0; \
1180 1181
	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1182
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1183 1184
	} \
	write##y(val, dev_priv->regs + reg); \
1185 1186 1187
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
1188 1189 1190 1191 1192 1193
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write