i915_dma.c 47.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31 32 33
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
34
#include <drm/drm_legacy.h>
J
Jesse Barnes 已提交
35
#include "intel_drv.h"
36
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
37
#include "i915_drv.h"
38
#include "i915_vgpu.h"
C
Chris Wilson 已提交
39
#include "i915_trace.h"
40
#include <linux/pci.h>
D
Daniel Vetter 已提交
41 42
#include <linux/console.h>
#include <linux/vt.h>
43
#include <linux/vgaarb.h>
44 45
#include <linux/acpi.h>
#include <linux/pnp.h>
46
#include <linux/vga_switcheroo.h>
47
#include <linux/slab.h>
48
#include <acpi/video.h>
49 50
#include <linux/pm.h>
#include <linux/pm_runtime.h>
51
#include <linux/oom.h>
L
Linus Torvalds 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
static unsigned int i915_load_fail_count;

bool __i915_inject_load_failure(const char *func, int line)
{
	if (i915_load_fail_count >= i915.inject_load_failure)
		return false;

	if (++i915_load_fail_count == i915.inject_load_failure) {
		DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
			 i915.inject_load_failure, func, line);
		return true;
	}

	return false;
}
L
Linus Torvalds 已提交
68

69 70 71 72 73 74 75 76 77 78 79
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
		    "providing the dmesg log by booting with drm.debug=0xf"

void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
	      const char *fmt, ...)
{
	static bool shown_bug_once;
	struct device *dev = dev_priv->dev->dev;
	bool is_error = level[1] <= KERN_ERR[1];
80
	bool is_debug = level[1] == KERN_DEBUG[1];
81 82 83
	struct va_format vaf;
	va_list args;

84 85 86
	if (is_debug && !(drm_debug & DRM_UT_DRIVER))
		return;

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
		   __builtin_return_address(0), &vaf);

	if (is_error && !shown_bug_once) {
		dev_notice(dev, "%s", FDO_BUG_MSG);
		shown_bug_once = true;
	}

	va_end(args);
}

static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
	return i915.inject_load_failure &&
	       i915_load_fail_count == i915.inject_load_failure;
}

#define i915_load_error(dev_priv, fmt, ...)				     \
	__i915_printk(dev_priv,						     \
		      i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
		      fmt, ##__VA_ARGS__)

114 115
static int i915_getparam(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
L
Linus Torvalds 已提交
116
{
117
	struct drm_i915_private *dev_priv = dev->dev_private;
118
	drm_i915_getparam_t *param = data;
L
Linus Torvalds 已提交
119 120
	int value;

121
	switch (param->param) {
L
Linus Torvalds 已提交
122 123
	case I915_PARAM_IRQ_ACTIVE:
	case I915_PARAM_ALLOW_BATCHBUFFER:
D
Dave Airlie 已提交
124
	case I915_PARAM_LAST_DISPATCH:
125
		/* Reject all old ums/dri params. */
126
		return -ENODEV;
K
Kristian Høgsberg 已提交
127
	case I915_PARAM_CHIPSET_ID:
128
		value = dev->pdev->device;
K
Kristian Høgsberg 已提交
129
		break;
N
Neil Roberts 已提交
130 131 132
	case I915_PARAM_REVISION:
		value = dev->pdev->revision;
		break;
133
	case I915_PARAM_HAS_GEM:
134
		value = 1;
135
		break;
136
	case I915_PARAM_NUM_FENCES_AVAIL:
D
Daniel Vetter 已提交
137
		value = dev_priv->num_fence_regs;
138
		break;
139 140 141
	case I915_PARAM_HAS_OVERLAY:
		value = dev_priv->overlay ? 1 : 0;
		break;
142 143 144
	case I915_PARAM_HAS_PAGEFLIPPING:
		value = 1;
		break;
J
Jesse Barnes 已提交
145 146
	case I915_PARAM_HAS_EXECBUF2:
		/* depends on GEM */
147
		value = 1;
J
Jesse Barnes 已提交
148
		break;
149
	case I915_PARAM_HAS_BSD:
150
		value = intel_engine_initialized(&dev_priv->engine[VCS]);
151
		break;
152
	case I915_PARAM_HAS_BLT:
153
		value = intel_engine_initialized(&dev_priv->engine[BCS]);
154
		break;
155
	case I915_PARAM_HAS_VEBOX:
156
		value = intel_engine_initialized(&dev_priv->engine[VECS]);
157
		break;
158
	case I915_PARAM_HAS_BSD2:
159
		value = intel_engine_initialized(&dev_priv->engine[VCS2]);
160
		break;
161 162 163
	case I915_PARAM_HAS_RELAXED_FENCING:
		value = 1;
		break;
164 165 166
	case I915_PARAM_HAS_COHERENT_RINGS:
		value = 1;
		break;
167 168 169
	case I915_PARAM_HAS_EXEC_CONSTANTS:
		value = INTEL_INFO(dev)->gen >= 4;
		break;
170 171 172
	case I915_PARAM_HAS_RELAXED_DELTA:
		value = 1;
		break;
173 174 175
	case I915_PARAM_HAS_GEN7_SOL_RESET:
		value = 1;
		break;
176 177 178
	case I915_PARAM_HAS_LLC:
		value = HAS_LLC(dev);
		break;
179 180 181
	case I915_PARAM_HAS_WT:
		value = HAS_WT(dev);
		break;
182
	case I915_PARAM_HAS_ALIASING_PPGTT:
183
		value = USES_PPGTT(dev);
184
		break;
185 186 187
	case I915_PARAM_HAS_WAIT_TIMEOUT:
		value = 1;
		break;
188
	case I915_PARAM_HAS_SEMAPHORES:
189
		value = i915_semaphore_is_enabled(dev_priv);
190
		break;
191 192 193
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
		value = 1;
		break;
194 195 196
	case I915_PARAM_HAS_SECURE_BATCHES:
		value = capable(CAP_SYS_ADMIN);
		break;
197 198 199
	case I915_PARAM_HAS_PINNED_BATCHES:
		value = 1;
		break;
200 201 202
	case I915_PARAM_HAS_EXEC_NO_RELOC:
		value = 1;
		break;
203 204 205
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
		value = 1;
		break;
206
	case I915_PARAM_CMD_PARSER_VERSION:
207
		value = i915_cmd_parser_get_version(dev_priv);
208
		break;
209 210
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
		value = 1;
211 212 213
		break;
	case I915_PARAM_MMAP_VERSION:
		value = 1;
214
		break;
215 216 217 218 219 220 221 222 223 224
	case I915_PARAM_SUBSLICE_TOTAL:
		value = INTEL_INFO(dev)->subslice_total;
		if (!value)
			return -ENODEV;
		break;
	case I915_PARAM_EU_TOTAL:
		value = INTEL_INFO(dev)->eu_total;
		if (!value)
			return -ENODEV;
		break;
225
	case I915_PARAM_HAS_GPU_RESET:
226
		value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
227
		break;
228 229 230
	case I915_PARAM_HAS_RESOURCE_STREAMER:
		value = HAS_RESOURCE_STREAMER(dev);
		break;
231 232 233
	case I915_PARAM_HAS_EXEC_SOFTPIN:
		value = 1;
		break;
L
Linus Torvalds 已提交
234
	default:
235
		DRM_DEBUG("Unknown parameter %d\n", param->param);
E
Eric Anholt 已提交
236
		return -EINVAL;
L
Linus Torvalds 已提交
237 238
	}

D
Daniel Vetter 已提交
239 240
	if (copy_to_user(param->value, &value, sizeof(int))) {
		DRM_ERROR("copy_to_user failed\n");
E
Eric Anholt 已提交
241
		return -EFAULT;
L
Linus Torvalds 已提交
242 243 244 245 246
	}

	return 0;
}

247 248 249 250
static int i915_get_bridge_dev(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

251
	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
252 253 254 255 256 257 258
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}

259 260 261 262
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
263
	struct drm_i915_private *dev_priv = dev->dev_private;
264
	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
265 266
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
267
	int ret;
268

269
	if (INTEL_INFO(dev)->gen >= 4)
270 271 272 273 274 275 276
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
277 278
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
279 280 281
#endif

	/* Get some space for it */
282 283 284 285
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
286 287
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
288
				     0, pcibios_align_resource,
289 290 291 292
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
293
		return ret;
294 295
	}

296
	if (INTEL_INFO(dev)->gen >= 4)
297 298 299 300 301
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
302
	return 0;
303 304 305 306 307 308
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
309
	struct drm_i915_private *dev_priv = dev->dev_private;
310
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
311 312 313
	u32 temp;
	bool enabled;

314
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
J
Jesse Barnes 已提交
315 316
		return;

317 318 319
	dev_priv->mchbar_need_disable = false;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
320
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

	if (intel_alloc_mchbar_resource(dev))
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
338
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
339 340 341 342 343 344 345 346 347 348
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
intel_teardown_mchbar(struct drm_device *dev)
{
349
	struct drm_i915_private *dev_priv = dev->dev_private;
350
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
351 352 353

	if (dev_priv->mchbar_need_disable) {
		if (IS_I915G(dev) || IS_I915GM(dev)) {
354 355 356 357 358 359 360
			u32 deven_val;

			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
					       deven_val);
361
		} else {
362 363 364 365 366 367 368
			u32 mchbar_val;

			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
					       mchbar_val);
369 370 371 372 373 374 375
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

376 377 378 379 380 381 382 383 384 385 386 387 388
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
	struct drm_device *dev = cookie;

	intel_modeset_vga_set_state(dev, state);
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

389 390 391 392
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
393

394
	if (state == VGA_SWITCHEROO_ON) {
395
		pr_info("switched on\n");
396
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
397 398
		/* i915 resume handler doesn't set to D0 */
		pci_set_power_state(dev->pdev, PCI_D0);
399
		i915_resume_switcheroo(dev);
400
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
401
	} else {
402
		pr_info("switched off\n");
403
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
404
		i915_suspend_switcheroo(dev, pmm);
405
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
406 407 408 409 410 411 412
	}
}

static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

413 414 415 416 417 418
	/*
	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
	 * locking inversion with the driver load path. And the access here is
	 * completely racy anyway. So don't bother with locking for now.
	 */
	return dev->open_count == 0;
419 420
}

421 422 423 424 425 426
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
	.set_gpu_state = i915_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = i915_switcheroo_can_switch,
};

427 428
static void i915_gem_fini(struct drm_device *dev)
{
429 430
	struct drm_i915_private *dev_priv = to_i915(dev);

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload. Afterwards we then clean up the
	 * GEM state tracking, flushing off the requests and leaving the
	 * system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that reseting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
	if (HAS_HW_CONTEXTS(dev)) {
451
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
452 453 454 455 456 457 458 459 460 461 462 463
		WARN_ON(reset && reset != -ENODEV);
	}

	mutex_lock(&dev->struct_mutex);
	i915_gem_reset(dev);
	i915_gem_cleanup_engines(dev);
	i915_gem_context_fini(dev);
	mutex_unlock(&dev->struct_mutex);

	WARN_ON(!list_empty(&to_i915(dev)->context_list));
}

464 465 466 467
static int i915_load_modeset_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
J
Jesse Barnes 已提交
468

469 470 471
	if (i915_inject_load_failure())
		return -ENODEV;

472
	ret = intel_bios_init(dev_priv);
J
Jesse Barnes 已提交
473 474 475
	if (ret)
		DRM_INFO("failed to find VBIOS tables\n");

476 477 478 479 480 481 482
	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
483 484 485
	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
	if (ret && ret != -ENODEV)
		goto out;
486

J
Jesse Barnes 已提交
487 488
	intel_register_dsm_handler();

489
	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
490
	if (ret)
491
		goto cleanup_vga_client;
492

493 494 495
	/* must happen before intel_power_domains_init_hw() on VLV/CHV */
	intel_update_rawclk(dev_priv);

496
	intel_power_domains_init_hw(dev_priv, false);
497

498
	intel_csr_ucode_init(dev_priv);
499

500
	ret = intel_irq_install(dev_priv);
501
	if (ret)
502
		goto cleanup_csr;
503

504 505
	intel_setup_gmbus(dev);

506 507
	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
508 509
	intel_modeset_init(dev);

510 511
	intel_guc_ucode_init(dev);

512
	ret = i915_gem_init(dev);
J
Jesse Barnes 已提交
513
	if (ret)
514
		goto cleanup_irq;
515

516
	intel_modeset_gem_init(dev);
517

J
Jesse Barnes 已提交
518 519
	/* Always safe in the mode setting case. */
	/* FIXME: do pre/post-mode set stuff in core KMS code */
520
	dev->vblank_disable_allowed = true;
521
	if (INTEL_INFO(dev)->num_pipes == 0)
B
Ben Widawsky 已提交
522
		return 0;
J
Jesse Barnes 已提交
523

524 525
	ret = intel_fbdev_init(dev);
	if (ret)
526 527
		goto cleanup_gem;

528
	/* Only enable hotplug handling once the fbdev is fully set up. */
529
	intel_hpd_init(dev_priv);
530 531 532 533 534

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
535 536 537 538 539
	 * irqs are fully enabled. Now we should scan for the initial config
	 * only once hotplug handling is enabled, but due to screwed-up locking
	 * around kms/fbdev init we can't protect the fdbev initial config
	 * scanning against hotplug events. Hence do this first and ignore the
	 * tiny window where we will loose hotplug notifactions.
540
	 */
541
	intel_fbdev_initial_config_async(dev);
542

543
	drm_kms_helper_poll_init(dev);
544

J
Jesse Barnes 已提交
545 546
	return 0;

547
cleanup_gem:
548
	i915_gem_fini(dev);
549
cleanup_irq:
550
	intel_guc_ucode_fini(dev);
551
	drm_irq_uninstall(dev);
552
	intel_teardown_gmbus(dev);
553 554
cleanup_csr:
	intel_csr_ucode_fini(dev_priv);
555
	intel_power_domains_fini(dev_priv);
556 557 558
	vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
	vga_client_register(dev->pdev, NULL, NULL, NULL);
J
Jesse Barnes 已提交
559 560 561 562
out:
	return ret;
}

563
#if IS_ENABLED(CONFIG_FB)
564
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
565 566 567
{
	struct apertures_struct *ap;
	struct pci_dev *pdev = dev_priv->dev->pdev;
568
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
569
	bool primary;
570
	int ret;
571 572 573

	ap = alloc_apertures(1);
	if (!ap)
574
		return -ENOMEM;
575

576 577
	ap->ranges[0].base = ggtt->mappable_base;
	ap->ranges[0].size = ggtt->mappable_end;
578

579 580 581
	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

582
	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
583 584

	kfree(ap);
585 586

	return ret;
587
}
588
#else
589
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
590
{
591
	return 0;
592 593
}
#endif
594

D
Daniel Vetter 已提交
595 596 597 598 599 600 601 602 603 604 605 606 607
#if !defined(CONFIG_VGA_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	return 0;
}
#elif !defined(CONFIG_DUMMY_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
	return -ENODEV;
}
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
608
	int ret = 0;
D
Daniel Vetter 已提交
609 610 611 612

	DRM_INFO("Replacing VGA console driver\n");

	console_lock();
613 614
	if (con_is_bound(&vga_con))
		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
D
Daniel Vetter 已提交
615 616 617 618 619 620 621 622 623 624 625 626 627
	if (ret == 0) {
		ret = do_unregister_con_driver(&vga_con);

		/* Ignore "already unregistered". */
		if (ret == -ENODEV)
			ret = 0;
	}
	console_unlock();

	return ret;
}
#endif

D
Daniel Vetter 已提交
628 629
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
630
	const struct intel_device_info *info = &dev_priv->info;
D
Daniel Vetter 已提交
631

632 633
#define PRINT_S(name) "%s"
#define SEP_EMPTY
634 635
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
636
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
637
			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
D
Daniel Vetter 已提交
638 639
			 info->gen,
			 dev_priv->dev->pdev->device,
640
			 dev_priv->dev->pdev->revision,
641
			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
642 643
#undef PRINT_S
#undef SEP_EMPTY
644 645
#undef PRINT_FLAG
#undef SEP_COMMA
D
Daniel Vetter 已提交
646 647
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
static void cherryview_sseu_info_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_device_info *info;
	u32 fuse, eu_dis;

	info = (struct intel_device_info *)&dev_priv->info;
	fuse = I915_READ(CHV_FUSE_GT);

	info->slice_total = 1;

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
		info->subslice_per_slice++;
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
				 CHV_FGT_EU_DIS_SS0_R1_MASK);
		info->eu_total += 8 - hweight32(eu_dis);
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
		info->subslice_per_slice++;
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
				 CHV_FGT_EU_DIS_SS1_R1_MASK);
		info->eu_total += 8 - hweight32(eu_dis);
	}

	info->subslice_total = info->subslice_per_slice;
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
	info->eu_per_subslice = info->subslice_total ?
				info->eu_total / info->subslice_total :
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
	info->has_slice_pg = 0;
	info->has_subslice_pg = (info->subslice_total > 1);
	info->has_eu_pg = (info->eu_per_subslice > 2);
}

static void gen9_sseu_info_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_device_info *info;
695
	int s_max = 3, ss_max = 4, eu_max = 8;
696
	int s, ss;
697 698 699
	u32 fuse2, s_enable, ss_disable, eu_disable;
	u8 eu_mask = 0xff;

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	info = (struct intel_device_info *)&dev_priv->info;
	fuse2 = I915_READ(GEN8_FUSE2);
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
		   GEN8_F2_S_ENA_SHIFT;
	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
		     GEN9_F2_SS_DIS_SHIFT;

	info->slice_total = hweight32(s_enable);
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
	info->subslice_total = info->slice_total *
			       info->subslice_per_slice;

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
	for (s = 0; s < s_max; s++) {
		if (!(s_enable & (0x1 << s)))
			/* skip disabled slice */
			continue;

725
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
726
		for (ss = 0; ss < ss_max; ss++) {
727
			int eu_per_ss;
728 729 730 731 732

			if (ss_disable & (0x1 << ss))
				/* skip disabled subslice */
				continue;

733 734
			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
						      eu_mask);
735 736 737 738 739 740

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
741
			if (eu_per_ss == 7)
742 743
				info->subslice_7eu[s] |= 1 << ss;

744
			info->eu_total += eu_per_ss;
745 746 747 748 749 750 751
		}
	}

	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
752 753
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
754 755 756 757 758 759 760
	*/
	info->eu_per_subslice = info->subslice_total ?
				DIV_ROUND_UP(info->eu_total,
					     info->subslice_total) : 0;
	/*
	 * SKL supports slice power gating on devices with more than
	 * one slice, and supports EU power gating on devices with
761 762 763 764
	 * more than one EU pair per subslice. BXT supports subslice
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
765
	*/
766 767
	info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
			       (info->slice_total > 1));
768 769
	info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
	info->has_eu_pg = (info->eu_per_subslice > 2);
770 771
}

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
static void broadwell_sseu_info_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_device_info *info;
	const int s_max = 3, ss_max = 3, eu_max = 8;
	int s, ss;
	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;

	fuse2 = I915_READ(GEN8_FUSE2);
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));


	info = (struct intel_device_info *)&dev_priv->info;
	info->slice_total = hweight32(s_enable);

	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
	info->subslice_total = info->slice_total * info->subslice_per_slice;

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
	for (s = 0; s < s_max; s++) {
		if (!(s_enable & (0x1 << s)))
			/* skip disabled slice */
			continue;

		for (ss = 0; ss < ss_max; ss++) {
			u32 n_disabled;

			if (ss_disable & (0x1 << ss))
				/* skip disabled subslice */
				continue;

			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));

			/*
			 * Record which subslices have 7 EUs.
			 */
			if (eu_max - n_disabled == 7)
				info->subslice_7eu[s] |= 1 << ss;

			info->eu_total += eu_max - n_disabled;
		}
	}

	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
	info->eu_per_subslice = info->subslice_total ?
		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
	info->has_slice_pg = (info->slice_total > 1);
	info->has_subslice_pg = 0;
	info->has_eu_pg = 0;
}

848 849 850 851 852 853 854
/*
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
855 856 857 858 859
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
860 861 862
 */
static void intel_device_info_runtime_init(struct drm_device *dev)
{
863
	struct drm_i915_private *dev_priv = dev->dev_private;
864
	struct intel_device_info *info;
865
	enum pipe pipe;
866

867
	info = (struct intel_device_info *)&dev_priv->info;
868

869 870 871 872 873 874 875 876
	/*
	 * Skylake and Broxton currently don't expose the topmost plane as its
	 * use is exclusive with the legacy cursor and we only want to expose
	 * one of those, not both. Until we can safely expose the topmost plane
	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
	 * we don't expose the topmost plane at all to prevent ABI breakage
	 * down the line.
	 */
877
	if (IS_BROXTON(dev)) {
878 879 880
		info->num_sprites[PIPE_A] = 2;
		info->num_sprites[PIPE_B] = 2;
		info->num_sprites[PIPE_C] = 1;
881
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
882
		for_each_pipe(dev_priv, pipe)
883 884
			info->num_sprites[pipe] = 2;
	else
885
		for_each_pipe(dev_priv, pipe)
886
			info->num_sprites[pipe] = 1;
887

888 889 890 891 892
	if (i915.disable_display) {
		DRM_INFO("Display disabled (module parameter)\n");
		info->num_pipes = 0;
	} else if (info->num_pipes > 0 &&
		   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
893
		   HAS_PCH_SPLIT(dev)) {
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
		    (dev_priv->pch_type == PCH_CPT &&
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
			info->num_pipes = 0;
912 913 914
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
			info->num_pipes -= 1;
915
		}
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
		u32 dfsm = I915_READ(SKL_DFSM);
		u8 disabled_mask = 0;
		bool invalid;
		int num_bits;

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
			disabled_mask |= BIT(PIPE_A);
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
			disabled_mask |= BIT(PIPE_B);
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
			disabled_mask |= BIT(PIPE_C);

		num_bits = hweight8(disabled_mask);

		switch (disabled_mask) {
		case BIT(PIPE_A):
		case BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_C):
			invalid = true;
			break;
		default:
			invalid = false;
		}

		if (num_bits > info->num_pipes || invalid)
			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
				  disabled_mask);
		else
			info->num_pipes -= num_bits;
947
	}
948

949
	/* Initialize slice/subslice/EU info */
950 951
	if (IS_CHERRYVIEW(dev))
		cherryview_sseu_info_init(dev);
952 953
	else if (IS_BROADWELL(dev))
		broadwell_sseu_info_init(dev);
954
	else if (INTEL_INFO(dev)->gen >= 9)
955
		gen9_sseu_info_init(dev);
956

957 958 959 960
	/* Snooping is broken on BXT A stepping. */
	info->has_snoop = !info->has_llc;
	info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);

961 962 963 964 965 966 967 968 969 970 971
	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
			 info->has_slice_pg ? "y" : "n");
	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
			 info->has_subslice_pg ? "y" : "n");
	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
			 info->has_eu_pg ? "y" : "n");
972 973

	i915.enable_execlists =
974 975
		intel_sanitize_enable_execlists(dev_priv,
					       	i915.enable_execlists);
976 977 978 979 980 981 982 983

	/*
	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
	 * user's requested state against the hardware/driver capabilities.  We
	 * do this now so that we can print out any log messages once rather
	 * than every time we check intel_enable_ppgtt().
	 */
	i915.enable_ppgtt =
984
		intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
985
	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
986 987
}

988 989 990 991 992 993 994 995 996 997
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
	/*
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
	 * CHV x1 PHY (DP/HDMI D)
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
	 */
	if (IS_CHERRYVIEW(dev_priv)) {
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
998
	} else if (IS_VALLEYVIEW(dev_priv)) {
999 1000 1001 1002
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
	}
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL)
		goto out_err;

	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->hotplug.dp_wq == NULL)
		goto out_free_wq;

	dev_priv->gpu_error.hangcheck_wq =
		alloc_ordered_workqueue("i915-hangcheck", 0);
	if (dev_priv->gpu_error.hangcheck_wq == NULL)
		goto out_free_dp_wq;

	return 0;

out_free_dp_wq:
	destroy_workqueue(dev_priv->hotplug.dp_wq);
out_free_wq:
	destroy_workqueue(dev_priv->wq);
out_err:
	DRM_ERROR("Failed to allocate workqueues.\n");

	return -ENOMEM;
}

static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
	destroy_workqueue(dev_priv->hotplug.dp_wq);
	destroy_workqueue(dev_priv->wq);
}

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
/**
 * i915_driver_init_early - setup state not requiring device access
 * @dev_priv: device private
 *
 * Initialize everything that is a "SW-only" state, that is state not
 * requiring accessing the device or exposing the driver via kernel internal
 * or userspace interfaces. Example steps belonging here: lock initialization,
 * system memory allocation, setting up device specific attributes and
 * function hooks not requiring accessing the device.
 */
static int i915_driver_init_early(struct drm_i915_private *dev_priv,
				  struct drm_device *dev,
				  struct intel_device_info *info)
{
	struct intel_device_info *device_info;
	int ret = 0;

1068 1069 1070
	if (i915_inject_load_failure())
		return -ENODEV;

1071 1072 1073 1074 1075
	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

1076 1077 1078
	BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
	device_info->gen_mask = BIT(device_info->gen - 1);

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->sb_lock);
	mutex_init(&dev_priv->modeset_restore_lock);
	mutex_init(&dev_priv->av_mutex);
	mutex_init(&dev_priv->wm.wm_mutex);
	mutex_init(&dev_priv->pps_mutex);

	ret = i915_workqueues_init(dev_priv);
	if (ret < 0)
		return ret;

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_pm_setup(dev);
	intel_init_dpio(dev_priv);
	intel_power_domains_init(dev_priv);
	intel_irq_init(dev_priv);
	intel_init_display_hooks(dev_priv);
	intel_init_clock_gating_hooks(dev_priv);
	intel_init_audio_hooks(dev_priv);
	i915_gem_load_init(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	return 0;
}

/**
 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
	i915_gem_load_cleanup(dev_priv->dev);
	i915_workqueues_cleanup(dev_priv);
}

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
static int i915_mmio_setup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	int mmio_bar;
	int mmio_size;

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/*
	 * Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (INTEL_INFO(dev)->gen < 5)
		mmio_size = 512 * 1024;
	else
		mmio_size = 2 * 1024 * 1024;
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (dev_priv->regs == NULL) {
		DRM_ERROR("failed to map registers\n");

		return -EIO;
	}

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);

	return 0;
}

static void i915_mmio_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	intel_teardown_mchbar(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
/**
 * i915_driver_init_mmio - setup device MMIO
 * @dev_priv: device private
 *
 * Setup minimal device state necessary for MMIO accesses later in the
 * initialization sequence. The setup here should avoid any other device-wide
 * side effects or exposing the driver via kernel internal or user space
 * interfaces.
 */
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

1186 1187 1188
	if (i915_inject_load_failure())
		return -ENODEV;

1189 1190 1191 1192 1193 1194 1195
	if (i915_get_bridge_dev(dev))
		return -EIO;

	ret = i915_mmio_setup(dev);
	if (ret < 0)
		goto put_bridge;

1196
	intel_uncore_init(dev_priv);
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213

	return 0;

put_bridge:
	pci_dev_put(dev_priv->bridge_dev);

	return ret;
}

/**
 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

1214
	intel_uncore_fini(dev_priv);
1215 1216 1217 1218
	i915_mmio_cleanup(dev);
	pci_dev_put(dev_priv->bridge_dev);
}

J
Jesse Barnes 已提交
1219
/**
1220 1221
 * i915_driver_init_hw - setup state requiring device access
 * @dev_priv: device private
J
Jesse Barnes 已提交
1222
 *
1223 1224
 * Setup state that requires accessing the device, but doesn't require
 * exposing the driver via kernel internal or userspace interfaces.
J
Jesse Barnes 已提交
1225
 */
1226
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1227
{
1228
	struct drm_device *dev = dev_priv->dev;
1229
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1230
	uint32_t aperture_size;
1231
	int ret;
1232

1233 1234 1235
	if (i915_inject_load_failure())
		return -ENODEV;

1236 1237
	intel_device_info_runtime_init(dev);

1238
	ret = i915_ggtt_init_hw(dev);
1239
	if (ret)
1240
		return ret;
1241

1242 1243 1244 1245 1246 1247
	ret = i915_ggtt_enable_hw(dev);
	if (ret) {
		DRM_ERROR("failed to enable GGTT\n");
		goto out_ggtt;
	}

1248 1249 1250 1251 1252
	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
	 * otherwise the vga fbdev driver falls over. */
	ret = i915_kick_out_firmware_fb(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1253
		goto out_ggtt;
1254
	}
D
Daniel Vetter 已提交
1255

1256 1257 1258
	ret = i915_kick_out_vgacon(dev_priv);
	if (ret) {
		DRM_ERROR("failed to remove conflicting VGA console\n");
1259
		goto out_ggtt;
D
Daniel Vetter 已提交
1260
	}
1261

1262 1263
	pci_set_master(dev->pdev);

1264 1265 1266 1267
	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

1279
	aperture_size = ggtt->mappable_end;
1280

1281 1282
	ggtt->mappable =
		io_mapping_create_wc(ggtt->mappable_base,
1283
				     aperture_size);
1284
	if (!ggtt->mappable) {
1285
		ret = -EIO;
1286
		goto out_ggtt;
1287 1288
	}

1289
	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
1290
					      aperture_size);
1291

1292 1293 1294
	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

1295
	intel_uncore_sanitize(dev_priv);
1296

1297
	intel_opregion_setup(dev);
1298

1299 1300
	i915_gem_load_init_fences(dev_priv);

1301 1302 1303 1304 1305 1306
	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
1307 1308
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
1309 1310
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
1311
	 */
1312 1313 1314 1315
	if (!IS_I945G(dev) && !IS_I945GM(dev)) {
		if (pci_enable_msi(dev->pdev) < 0)
			DRM_DEBUG_DRIVER("can't enable MSI");
	}
1316

1317 1318
	return 0;

1319 1320
out_ggtt:
	i915_ggtt_cleanup_hw(dev);
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331

	return ret;
}

/**
 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
 * @dev_priv: device private
 */
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
1332
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1333 1334 1335 1336 1337

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	pm_qos_remove_request(&dev_priv->pm_qos);
1338 1339
	arch_phys_wc_del(ggtt->mtrr);
	io_mapping_free(ggtt->mappable);
1340
	i915_ggtt_cleanup_hw(dev);
1341 1342
}

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
/**
 * i915_driver_register - register the driver with the rest of the system
 * @dev_priv: device private
 *
 * Perform any steps necessary to make the driver available via kernel
 * internal or userspace interfaces.
 */
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

	i915_gem_shrinker_init(dev_priv);
	/*
	 * Notify a valid surface after modesetting,
	 * when running inside a VM.
	 */
1359
	if (intel_vgpu_active(dev_priv))
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev_priv)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev_priv))
		intel_gpu_ips_init(dev_priv);

	i915_audio_component_init(dev_priv);
}

/**
 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 * @dev_priv: device private
 */
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
	i915_audio_component_cleanup(dev_priv);
	intel_gpu_ips_teardown();
	acpi_video_unregister();
	intel_opregion_fini(dev_priv->dev);
	i915_teardown_sysfs(dev_priv->dev);
	i915_gem_shrinker_cleanup(dev_priv);
}

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
/**
 * i915_driver_load - setup chip and create an initial config
 * @dev: DRM device
 * @flags: startup flags
 *
 * The driver load routine has to do several things:
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	int ret = 0;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = dev_priv;
1411 1412
	/* Must be set before calling __i915_printk */
	dev_priv->dev = dev;
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429

	ret = i915_driver_init_early(dev_priv, dev,
				     (struct intel_device_info *)flags);

	if (ret < 0)
		goto out_free_priv;

	intel_runtime_pm_get(dev_priv);

	ret = i915_driver_init_mmio(dev_priv);
	if (ret < 0)
		goto out_runtime_pm_put;

	ret = i915_driver_init_hw(dev_priv);
	if (ret < 0)
		goto out_cleanup_mmio;

1430 1431 1432 1433 1434
	/*
	 * TODO: move the vblank init and parts of modeset init steps into one
	 * of the i915_driver_init_/i915_driver_register functions according
	 * to the role/effect of the given init step.
	 */
B
Ben Widawsky 已提交
1435 1436 1437
	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
1438
			goto out_cleanup_hw;
B
Ben Widawsky 已提交
1439
	}
1440

1441
	ret = i915_load_modeset_init(dev);
1442
	if (ret < 0)
1443
		goto out_cleanup_vblank;
J
Jesse Barnes 已提交
1444

1445
	i915_driver_register(dev_priv);
I
Imre Deak 已提交
1446

1447 1448
	intel_runtime_pm_enable(dev_priv);

1449 1450
	intel_runtime_pm_put(dev_priv);

J
Jesse Barnes 已提交
1451 1452
	return 0;

1453
out_cleanup_vblank:
1454
	drm_vblank_cleanup(dev);
1455 1456
out_cleanup_hw:
	i915_driver_cleanup_hw(dev_priv);
1457 1458
out_cleanup_mmio:
	i915_driver_cleanup_mmio(dev_priv);
1459
out_runtime_pm_put:
1460
	intel_runtime_pm_put(dev_priv);
1461
	i915_driver_cleanup_early(dev_priv);
1462
out_free_priv:
1463 1464
	i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);

1465 1466
	kfree(dev_priv);

J
Jesse Barnes 已提交
1467 1468 1469 1470 1471 1472
	return ret;
}

int i915_driver_unload(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1473
	int ret;
J
Jesse Barnes 已提交
1474

1475 1476
	intel_fbdev_fini(dev);

1477 1478 1479 1480 1481 1482
	ret = i915_gem_suspend(dev);
	if (ret) {
		DRM_ERROR("failed to idle hardware: %d\n", ret);
		return ret;
	}

1483
	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1484

1485
	i915_driver_unregister(dev_priv);
1486

1487 1488
	drm_vblank_cleanup(dev);

1489
	intel_modeset_cleanup(dev);
1490

1491 1492 1493 1494 1495 1496 1497 1498
	/*
	 * free the memory space allocated for the child device
	 * config parsed from VBT
	 */
	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
		kfree(dev_priv->vbt.child_dev);
		dev_priv->vbt.child_dev = NULL;
		dev_priv->vbt.child_dev_num = 0;
J
Jesse Barnes 已提交
1499
	}
M
Matt Roper 已提交
1500 1501 1502 1503
	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
J
Jesse Barnes 已提交
1504

1505 1506 1507
	vga_switcheroo_unregister_client(dev->pdev);
	vga_client_register(dev->pdev, NULL, NULL, NULL);

1508 1509
	intel_csr_ucode_fini(dev_priv);

1510
	/* Free error state after interrupts are fully disabled. */
1511
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1512
	i915_destroy_error_state(dev);
1513

1514 1515
	/* Flush any outstanding unpin_work. */
	flush_workqueue(dev_priv->wq);
1516

1517
	intel_guc_ucode_fini(dev);
1518
	i915_gem_fini(dev);
1519
	intel_fbc_cleanup_cfb(dev_priv);
J
Jesse Barnes 已提交
1520

1521 1522
	intel_power_domains_fini(dev_priv);

1523
	i915_driver_cleanup_hw(dev_priv);
1524
	i915_driver_cleanup_mmio(dev_priv);
1525 1526 1527

	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);

1528
	i915_driver_cleanup_early(dev_priv);
1529
	kfree(dev_priv);
J
Jesse Barnes 已提交
1530

1531 1532 1533
	return 0;
}

1534
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1535
{
1536
	int ret;
1537

1538 1539 1540
	ret = i915_gem_open(dev, file);
	if (ret)
		return ret;
1541

1542 1543 1544
	return 0;
}

J
Jesse Barnes 已提交
1545 1546 1547 1548 1549 1550 1551 1552
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
1553
 * Additionally, in the non-mode setting case, we'll tear down the GTT
J
Jesse Barnes 已提交
1554 1555 1556
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
1557
void i915_driver_lastclose(struct drm_device *dev)
L
Linus Torvalds 已提交
1558
{
D
Daniel Vetter 已提交
1559 1560
	intel_fbdev_restore_mode(dev);
	vga_switcheroo_process_delayed_switch();
L
Linus Torvalds 已提交
1561 1562
}

1563
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
L
Linus Torvalds 已提交
1564
{
1565
	mutex_lock(&dev->struct_mutex);
1566 1567
	i915_gem_context_close(dev, file);
	i915_gem_release(dev, file);
1568
	mutex_unlock(&dev->struct_mutex);
L
Linus Torvalds 已提交
1569 1570
}

1571
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1572
{
1573
	struct drm_i915_file_private *file_priv = file->driver_priv;
1574

1575
	kfree(file_priv);
1576 1577
}

D
Daniel Vetter 已提交
1578 1579 1580 1581 1582 1583 1584
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
{
	return -ENODEV;
}

R
Rob Clark 已提交
1585
const struct drm_ioctl_desc i915_ioctls[] = {
1586 1587 1588 1589 1590 1591
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1592
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
D
Daniel Vetter 已提交
1593
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
D
Daniel Vetter 已提交
1594 1595 1596
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1597
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
D
Daniel Vetter 已提交
1598
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1599
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1600 1601 1602
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
D
Dave Airlie 已提交
1638 1639
};

1640
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);