i915_dma.c 52.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31 32 33
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
J
Jesse Barnes 已提交
34
#include "intel_drv.h"
35
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
38
#include <linux/pci.h>
39
#include <linux/vgaarb.h>
40 41
#include <linux/acpi.h>
#include <linux/pnp.h>
42
#include <linux/vga_switcheroo.h>
43
#include <linux/slab.h>
44
#include <acpi/video.h>
45 46
#include <linux/pm.h>
#include <linux/pm_runtime.h>
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54 55 56
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])

#define BEGIN_LP_RING(n) \
	intel_ring_begin(LP_RING(dev_priv), (n))

#define OUT_RING(x) \
	intel_ring_emit(LP_RING(dev_priv), x)

#define ADVANCE_LP_RING() \
57
	__intel_ring_advance(LP_RING(dev_priv))
58 59 60 61 62 63 64 65 66 67 68 69

/**
 * Lock test for when it's just for synchronization of ring access.
 *
 * In that case, we don't need to do it when GEM is initialized as nobody else
 * has access to the ring.
 */
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
	if (LP_RING(dev->dev_private)->obj == NULL)			\
		LOCK_TEST_WITH_RETURN(dev, file);			\
} while (0)

70 71 72 73 74 75 76 77 78 79
static inline u32
intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
{
	if (I915_NEED_GFX_HWS(dev_priv->dev))
		return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
	else
		return intel_read_status_page(LP_RING(dev_priv), reg);
}

#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
80 81 82
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_BREADCRUMB_INDEX		0x21

83 84 85 86 87 88 89 90 91 92 93 94 95
void i915_update_dri1_breadcrumb(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv;

	if (dev->primary->master) {
		master_priv = dev->primary->master->driver_priv;
		if (master_priv->sarea_priv)
			master_priv->sarea_priv->last_dispatch =
				READ_BREADCRUMB(dev_priv);
	}
}

96 97 98 99 100 101 102 103 104 105 106
static void i915_write_hws_pga(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 addr;

	addr = dev_priv->status_page_dmah->busaddr;
	if (INTEL_INFO(dev)->gen >= 4)
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);
}

107 108 109 110
/**
 * Frees the hardware status page, whether it's a physical address or a virtual
 * address set up by the X Server.
 */
111
static void i915_free_hws(struct drm_device *dev)
112 113
{
	drm_i915_private_t *dev_priv = dev->dev_private;
114 115
	struct intel_ring_buffer *ring = LP_RING(dev_priv);

116 117 118 119 120
	if (dev_priv->status_page_dmah) {
		drm_pci_free(dev, dev_priv->status_page_dmah);
		dev_priv->status_page_dmah = NULL;
	}

121 122
	if (ring->status_page.gfx_addr) {
		ring->status_page.gfx_addr = 0;
123
		iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
124 125 126 127 128 129
	}

	/* Need to rewrite hardware status page */
	I915_WRITE(HWS_PGA, 0x1ffff000);
}

130
void i915_kernel_lost_context(struct drm_device * dev)
L
Linus Torvalds 已提交
131 132
{
	drm_i915_private_t *dev_priv = dev->dev_private;
133
	struct drm_i915_master_private *master_priv;
134
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
L
Linus Torvalds 已提交
135

J
Jesse Barnes 已提交
136 137 138 139 140 141 142
	/*
	 * We should never lose context on the ring with modesetting
	 * as we don't expose it to userspace
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;

143 144
	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
145
	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
L
Linus Torvalds 已提交
146
	if (ring->space < 0)
147
		ring->space += ring->size;
L
Linus Torvalds 已提交
148

149 150 151 152 153 154
	if (!dev->primary->master)
		return;

	master_priv = dev->primary->master->driver_priv;
	if (ring->head == ring->tail && master_priv->sarea_priv)
		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
L
Linus Torvalds 已提交
155 156
}

157
static int i915_dma_cleanup(struct drm_device * dev)
L
Linus Torvalds 已提交
158
{
J
Jesse Barnes 已提交
159
	drm_i915_private_t *dev_priv = dev->dev_private;
160 161
	int i;

L
Linus Torvalds 已提交
162 163 164 165
	/* Make sure interrupts are disabled here because the uninstall ioctl
	 * may not have been called from userspace and after dev_private
	 * is freed, it's too late.
	 */
166
	if (dev->irq_enabled)
D
Dave Airlie 已提交
167
		drm_irq_uninstall(dev);
L
Linus Torvalds 已提交
168

169
	mutex_lock(&dev->struct_mutex);
170 171
	for (i = 0; i < I915_NUM_RINGS; i++)
		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
172
	mutex_unlock(&dev->struct_mutex);
173

174 175 176
	/* Clear the HWS virtual address at teardown */
	if (I915_NEED_GFX_HWS(dev))
		i915_free_hws(dev);
L
Linus Torvalds 已提交
177 178 179 180

	return 0;
}

J
Jesse Barnes 已提交
181
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
L
Linus Torvalds 已提交
182
{
J
Jesse Barnes 已提交
183
	drm_i915_private_t *dev_priv = dev->dev_private;
184
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
185
	int ret;
L
Linus Torvalds 已提交
186

187 188 189 190 191
	master_priv->sarea = drm_getsarea(dev);
	if (master_priv->sarea) {
		master_priv->sarea_priv = (drm_i915_sarea_t *)
			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
	} else {
192
		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
193 194
	}

195
	if (init->ring_size != 0) {
196
		if (LP_RING(dev_priv)->obj != NULL) {
197 198 199 200 201
			i915_dma_cleanup(dev);
			DRM_ERROR("Client tried to initialize ringbuffer in "
				  "GEM mode\n");
			return -EINVAL;
		}
L
Linus Torvalds 已提交
202

203 204 205 206
		ret = intel_render_ring_init_dri(dev,
						 init->ring_start,
						 init->ring_size);
		if (ret) {
207
			i915_dma_cleanup(dev);
208
			return ret;
209
		}
L
Linus Torvalds 已提交
210 211
	}

212 213 214 215
	dev_priv->dri1.cpp = init->cpp;
	dev_priv->dri1.back_offset = init->back_offset;
	dev_priv->dri1.front_offset = init->front_offset;
	dev_priv->dri1.current_page = 0;
216 217
	if (master_priv->sarea_priv)
		master_priv->sarea_priv->pf_current_page = 0;
L
Linus Torvalds 已提交
218 219 220

	/* Allow hardware batchbuffers unless told otherwise.
	 */
221
	dev_priv->dri1.allow_batchbuffer = 1;
L
Linus Torvalds 已提交
222 223 224 225

	return 0;
}

226
static int i915_dma_resume(struct drm_device * dev)
L
Linus Torvalds 已提交
227 228
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
229
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
L
Linus Torvalds 已提交
230

231
	DRM_DEBUG_DRIVER("%s\n", __func__);
L
Linus Torvalds 已提交
232

233
	if (ring->virtual_start == NULL) {
L
Linus Torvalds 已提交
234 235
		DRM_ERROR("can not ioremap virtual address for"
			  " ring buffer\n");
E
Eric Anholt 已提交
236
		return -ENOMEM;
L
Linus Torvalds 已提交
237 238 239
	}

	/* Program Hardware Status Page */
240
	if (!ring->status_page.page_addr) {
L
Linus Torvalds 已提交
241
		DRM_ERROR("Can not find hardware status page\n");
E
Eric Anholt 已提交
242
		return -EINVAL;
L
Linus Torvalds 已提交
243
	}
244
	DRM_DEBUG_DRIVER("hw status page @ %p\n",
245 246
				ring->status_page.page_addr);
	if (ring->status_page.gfx_addr != 0)
247
		intel_ring_setup_status_page(ring);
248
	else
249
		i915_write_hws_pga(dev);
250

251
	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
L
Linus Torvalds 已提交
252 253 254 255

	return 0;
}

256 257
static int i915_dma_init(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
L
Linus Torvalds 已提交
258
{
259
	drm_i915_init_t *init = data;
L
Linus Torvalds 已提交
260 261
	int retcode = 0;

262 263 264
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

265
	switch (init->func) {
L
Linus Torvalds 已提交
266
	case I915_INIT_DMA:
J
Jesse Barnes 已提交
267
		retcode = i915_initialize(dev, init);
L
Linus Torvalds 已提交
268 269 270 271 272
		break;
	case I915_CLEANUP_DMA:
		retcode = i915_dma_cleanup(dev);
		break;
	case I915_RESUME_DMA:
D
Dave Airlie 已提交
273
		retcode = i915_dma_resume(dev);
L
Linus Torvalds 已提交
274 275
		break;
	default:
E
Eric Anholt 已提交
276
		retcode = -EINVAL;
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
		break;
	}

	return retcode;
}

/* Implement basically the same security restrictions as hardware does
 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
 *
 * Most of the calculations below involve calculating the size of a
 * particular instruction.  It's important to get the size right as
 * that tells us where the next instruction to check is.  Any illegal
 * instruction detected will be given a size of zero, which is a
 * signal to abort the rest of the buffer.
 */
292
static int validate_cmd(int cmd)
L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
{
	switch (((cmd >> 29) & 0x7)) {
	case 0x0:
		switch ((cmd >> 23) & 0x3f) {
		case 0x0:
			return 1;	/* MI_NOOP */
		case 0x4:
			return 1;	/* MI_FLUSH */
		default:
			return 0;	/* disallow everything else */
		}
		break;
	case 0x1:
		return 0;	/* reserved */
	case 0x2:
		return (cmd & 0xff) + 2;	/* 2d commands */
	case 0x3:
		if (((cmd >> 24) & 0x1f) <= 0x18)
			return 1;

		switch ((cmd >> 24) & 0x1f) {
		case 0x1c:
			return 1;
		case 0x1d:
D
Dave Airlie 已提交
317
			switch ((cmd >> 16) & 0xff) {
L
Linus Torvalds 已提交
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
			case 0x3:
				return (cmd & 0x1f) + 2;
			case 0x4:
				return (cmd & 0xf) + 2;
			default:
				return (cmd & 0xffff) + 2;
			}
		case 0x1e:
			if (cmd & (1 << 23))
				return (cmd & 0xffff) + 1;
			else
				return 1;
		case 0x1f:
			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
				return (cmd & 0x1ffff) + 2;
			else if (cmd & (1 << 17))	/* indirect random */
				if ((cmd & 0xffff) == 0)
					return 0;	/* unknown length, too hard */
				else
					return (((cmd & 0xffff) + 1) / 2) + 1;
			else
				return 2;	/* indirect sequential */
		default:
			return 0;
		}
	default:
		return 0;
	}

	return 0;
}

350
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
L
Linus Torvalds 已提交
351 352
{
	drm_i915_private_t *dev_priv = dev->dev_private;
353
	int i, ret;
L
Linus Torvalds 已提交
354

355
	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
E
Eric Anholt 已提交
356
		return -EINVAL;
357

L
Linus Torvalds 已提交
358
	for (i = 0; i < dwords;) {
359 360
		int sz = validate_cmd(buffer[i]);
		if (sz == 0 || i + sz > dwords)
E
Eric Anholt 已提交
361
			return -EINVAL;
362
		i += sz;
L
Linus Torvalds 已提交
363 364
	}

365 366 367 368 369 370
	ret = BEGIN_LP_RING((dwords+1)&~1);
	if (ret)
		return ret;

	for (i = 0; i < dwords; i++)
		OUT_RING(buffer[i]);
371 372 373 374 375
	if (dwords & 1)
		OUT_RING(0);

	ADVANCE_LP_RING();

L
Linus Torvalds 已提交
376 377 378
	return 0;
}

379 380
int
i915_emit_box(struct drm_device *dev,
381 382
	      struct drm_clip_rect *box,
	      int DR1, int DR4)
L
Linus Torvalds 已提交
383
{
384 385
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
L
Linus Torvalds 已提交
386

387 388
	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
	    box->y2 <= 0 || box->x2 <= 0) {
L
Linus Torvalds 已提交
389
		DRM_ERROR("Bad box %d,%d..%d,%d\n",
390
			  box->x1, box->y1, box->x2, box->y2);
E
Eric Anholt 已提交
391
		return -EINVAL;
L
Linus Torvalds 已提交
392 393
	}

394
	if (INTEL_INFO(dev)->gen >= 4) {
395 396 397 398
		ret = BEGIN_LP_RING(4);
		if (ret)
			return ret;

399
		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
400 401
		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
402 403
		OUT_RING(DR4);
	} else {
404 405 406 407
		ret = BEGIN_LP_RING(6);
		if (ret)
			return ret;

408 409
		OUT_RING(GFX_OP_DRAWRECT_INFO);
		OUT_RING(DR1);
410 411
		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
412 413 414
		OUT_RING(DR4);
		OUT_RING(0);
	}
415
	ADVANCE_LP_RING();
L
Linus Torvalds 已提交
416 417 418 419

	return 0;
}

420 421 422 423
/* XXX: Emitting the counter should really be moved to part of the IRQ
 * emit. For now, do it in both places:
 */

424
static void i915_emit_breadcrumb(struct drm_device *dev)
425 426
{
	drm_i915_private_t *dev_priv = dev->dev_private;
427
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
428

429 430 431
	dev_priv->dri1.counter++;
	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
		dev_priv->dri1.counter = 0;
432
	if (master_priv->sarea_priv)
433
		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
434

435 436 437
	if (BEGIN_LP_RING(4) == 0) {
		OUT_RING(MI_STORE_DWORD_INDEX);
		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
438
		OUT_RING(dev_priv->dri1.counter);
439 440 441
		OUT_RING(0);
		ADVANCE_LP_RING();
	}
442 443
}

444
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
445 446 447
				   drm_i915_cmdbuffer_t *cmd,
				   struct drm_clip_rect *cliprects,
				   void *cmdbuf)
L
Linus Torvalds 已提交
448 449 450 451 452 453
{
	int nbox = cmd->num_cliprects;
	int i = 0, count, ret;

	if (cmd->sz & 0x3) {
		DRM_ERROR("alignment");
E
Eric Anholt 已提交
454
		return -EINVAL;
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462
	}

	i915_kernel_lost_context(dev);

	count = nbox ? nbox : 1;

	for (i = 0; i < count; i++) {
		if (i < nbox) {
463
			ret = i915_emit_box(dev, &cliprects[i],
L
Linus Torvalds 已提交
464 465 466 467 468
					    cmd->DR1, cmd->DR4);
			if (ret)
				return ret;
		}

469
		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
L
Linus Torvalds 已提交
470 471 472 473
		if (ret)
			return ret;
	}

474
	i915_emit_breadcrumb(dev);
L
Linus Torvalds 已提交
475 476 477
	return 0;
}

478
static int i915_dispatch_batchbuffer(struct drm_device * dev,
479 480
				     drm_i915_batchbuffer_t * batch,
				     struct drm_clip_rect *cliprects)
L
Linus Torvalds 已提交
481
{
482
	struct drm_i915_private *dev_priv = dev->dev_private;
L
Linus Torvalds 已提交
483
	int nbox = batch->num_cliprects;
484
	int i, count, ret;
L
Linus Torvalds 已提交
485 486 487

	if ((batch->start | batch->used) & 0x7) {
		DRM_ERROR("alignment");
E
Eric Anholt 已提交
488
		return -EINVAL;
L
Linus Torvalds 已提交
489 490 491 492 493 494 495
	}

	i915_kernel_lost_context(dev);

	count = nbox ? nbox : 1;
	for (i = 0; i < count; i++) {
		if (i < nbox) {
496
			ret = i915_emit_box(dev, &cliprects[i],
497
					    batch->DR1, batch->DR4);
L
Linus Torvalds 已提交
498 499 500 501
			if (ret)
				return ret;
		}

502
		if (!IS_I830(dev) && !IS_845G(dev)) {
503 504 505 506
			ret = BEGIN_LP_RING(2);
			if (ret)
				return ret;

507
			if (INTEL_INFO(dev)->gen >= 4) {
508 509 510 511 512 513
				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
				OUT_RING(batch->start);
			} else {
				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
			}
L
Linus Torvalds 已提交
514
		} else {
515 516 517 518
			ret = BEGIN_LP_RING(4);
			if (ret)
				return ret;

L
Linus Torvalds 已提交
519 520 521 522 523
			OUT_RING(MI_BATCH_BUFFER);
			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
			OUT_RING(batch->start + batch->used - 4);
			OUT_RING(0);
		}
524
		ADVANCE_LP_RING();
L
Linus Torvalds 已提交
525 526
	}

527

528
	if (IS_G4X(dev) || IS_GEN5(dev)) {
529 530 531 532 533
		if (BEGIN_LP_RING(2) == 0) {
			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
			OUT_RING(MI_NOOP);
			ADVANCE_LP_RING();
		}
534
	}
L
Linus Torvalds 已提交
535

536
	i915_emit_breadcrumb(dev);
L
Linus Torvalds 已提交
537 538 539
	return 0;
}

540
static int i915_dispatch_flip(struct drm_device * dev)
L
Linus Torvalds 已提交
541 542
{
	drm_i915_private_t *dev_priv = dev->dev_private;
543 544
	struct drm_i915_master_private *master_priv =
		dev->primary->master->driver_priv;
545
	int ret;
L
Linus Torvalds 已提交
546

547
	if (!master_priv->sarea_priv)
548 549
		return -EINVAL;

550
	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
551
			  __func__,
552
			 dev_priv->dri1.current_page,
553
			 master_priv->sarea_priv->pf_current_page);
L
Linus Torvalds 已提交
554

555 556
	i915_kernel_lost_context(dev);

557 558 559 560
	ret = BEGIN_LP_RING(10);
	if (ret)
		return ret;

561
	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
562
	OUT_RING(0);
L
Linus Torvalds 已提交
563

564 565
	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
	OUT_RING(0);
566 567 568
	if (dev_priv->dri1.current_page == 0) {
		OUT_RING(dev_priv->dri1.back_offset);
		dev_priv->dri1.current_page = 1;
L
Linus Torvalds 已提交
569
	} else {
570 571
		OUT_RING(dev_priv->dri1.front_offset);
		dev_priv->dri1.current_page = 0;
L
Linus Torvalds 已提交
572
	}
573
	OUT_RING(0);
L
Linus Torvalds 已提交
574

575 576
	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
	OUT_RING(0);
577

578
	ADVANCE_LP_RING();
L
Linus Torvalds 已提交
579

580
	master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
L
Linus Torvalds 已提交
581

582 583 584
	if (BEGIN_LP_RING(4) == 0) {
		OUT_RING(MI_STORE_DWORD_INDEX);
		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
585
		OUT_RING(dev_priv->dri1.counter);
586 587 588
		OUT_RING(0);
		ADVANCE_LP_RING();
	}
L
Linus Torvalds 已提交
589

590
	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
591
	return 0;
L
Linus Torvalds 已提交
592 593
}

594
static int i915_quiescent(struct drm_device *dev)
L
Linus Torvalds 已提交
595 596
{
	i915_kernel_lost_context(dev);
597
	return intel_ring_idle(LP_RING(dev->dev_private));
L
Linus Torvalds 已提交
598 599
}

600 601
static int i915_flush_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file_priv)
L
Linus Torvalds 已提交
602
{
603 604
	int ret;

605 606 607
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

608
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
L
Linus Torvalds 已提交
609

610 611 612 613 614
	mutex_lock(&dev->struct_mutex);
	ret = i915_quiescent(dev);
	mutex_unlock(&dev->struct_mutex);

	return ret;
L
Linus Torvalds 已提交
615 616
}

617 618
static int i915_batchbuffer(struct drm_device *dev, void *data,
			    struct drm_file *file_priv)
L
Linus Torvalds 已提交
619 620
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
621
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
L
Linus Torvalds 已提交
622
	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
623
	    master_priv->sarea_priv;
624
	drm_i915_batchbuffer_t *batch = data;
L
Linus Torvalds 已提交
625
	int ret;
626
	struct drm_clip_rect *cliprects = NULL;
L
Linus Torvalds 已提交
627

628 629 630
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

631
	if (!dev_priv->dri1.allow_batchbuffer) {
L
Linus Torvalds 已提交
632
		DRM_ERROR("Batchbuffer ioctl disabled\n");
E
Eric Anholt 已提交
633
		return -EINVAL;
L
Linus Torvalds 已提交
634 635
	}

636
	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
637
			batch->start, batch->used, batch->num_cliprects);
L
Linus Torvalds 已提交
638

639
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
L
Linus Torvalds 已提交
640

641 642 643 644
	if (batch->num_cliprects < 0)
		return -EINVAL;

	if (batch->num_cliprects) {
645
		cliprects = kcalloc(batch->num_cliprects,
646
				    sizeof(*cliprects),
647
				    GFP_KERNEL);
648 649 650 651 652 653
		if (cliprects == NULL)
			return -ENOMEM;

		ret = copy_from_user(cliprects, batch->cliprects,
				     batch->num_cliprects *
				     sizeof(struct drm_clip_rect));
654 655
		if (ret != 0) {
			ret = -EFAULT;
656
			goto fail_free;
657
		}
658
	}
L
Linus Torvalds 已提交
659

660
	mutex_lock(&dev->struct_mutex);
661
	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
662
	mutex_unlock(&dev->struct_mutex);
L
Linus Torvalds 已提交
663

664
	if (sarea_priv)
665
		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
666 667

fail_free:
668
	kfree(cliprects);
669

L
Linus Torvalds 已提交
670 671 672
	return ret;
}

673 674
static int i915_cmdbuffer(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
L
Linus Torvalds 已提交
675 676
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
677
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
L
Linus Torvalds 已提交
678
	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
679
	    master_priv->sarea_priv;
680
	drm_i915_cmdbuffer_t *cmdbuf = data;
681 682
	struct drm_clip_rect *cliprects = NULL;
	void *batch_data;
L
Linus Torvalds 已提交
683 684
	int ret;

685
	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
686
			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
L
Linus Torvalds 已提交
687

688 689 690
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

691
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
L
Linus Torvalds 已提交
692

693 694 695
	if (cmdbuf->num_cliprects < 0)
		return -EINVAL;

696
	batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
697 698 699 700
	if (batch_data == NULL)
		return -ENOMEM;

	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
701 702
	if (ret != 0) {
		ret = -EFAULT;
703
		goto fail_batch_free;
704
	}
705 706

	if (cmdbuf->num_cliprects) {
707
		cliprects = kcalloc(cmdbuf->num_cliprects,
708
				    sizeof(*cliprects), GFP_KERNEL);
709 710
		if (cliprects == NULL) {
			ret = -ENOMEM;
711
			goto fail_batch_free;
712
		}
713 714 715 716

		ret = copy_from_user(cliprects, cmdbuf->cliprects,
				     cmdbuf->num_cliprects *
				     sizeof(struct drm_clip_rect));
717 718
		if (ret != 0) {
			ret = -EFAULT;
719
			goto fail_clip_free;
720
		}
L
Linus Torvalds 已提交
721 722
	}

723
	mutex_lock(&dev->struct_mutex);
724
	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
725
	mutex_unlock(&dev->struct_mutex);
L
Linus Torvalds 已提交
726 727
	if (ret) {
		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
728
		goto fail_clip_free;
L
Linus Torvalds 已提交
729 730
	}

731
	if (sarea_priv)
732
		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
733 734

fail_clip_free:
735
	kfree(cliprects);
736
fail_batch_free:
737
	kfree(batch_data);
738 739

	return ret;
L
Linus Torvalds 已提交
740 741
}

742 743 744 745 746 747 748 749 750
static int i915_emit_irq(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;

	i915_kernel_lost_context(dev);

	DRM_DEBUG_DRIVER("\n");

751 752 753
	dev_priv->dri1.counter++;
	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
		dev_priv->dri1.counter = 1;
754
	if (master_priv->sarea_priv)
755
		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
756 757 758 759

	if (BEGIN_LP_RING(4) == 0) {
		OUT_RING(MI_STORE_DWORD_INDEX);
		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
760
		OUT_RING(dev_priv->dri1.counter);
761 762 763 764
		OUT_RING(MI_USER_INTERRUPT);
		ADVANCE_LP_RING();
	}

765
	return dev_priv->dri1.counter;
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
}

static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
	int ret = 0;
	struct intel_ring_buffer *ring = LP_RING(dev_priv);

	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
		  READ_BREADCRUMB(dev_priv));

	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
		if (master_priv->sarea_priv)
			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
		return 0;
	}

	if (master_priv->sarea_priv)
		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;

	if (ring->irq_get(ring)) {
		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
			    READ_BREADCRUMB(dev_priv) >= irq_nr);
		ring->irq_put(ring);
	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
		ret = -EBUSY;

	if (ret == -EBUSY) {
		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
796
			  READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
	}

	return ret;
}

/* Needs the lock as it touches the ring.
 */
static int i915_irq_emit(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_irq_emit_t *emit = data;
	int result;

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);

	mutex_lock(&dev->struct_mutex);
	result = i915_emit_irq(dev);
	mutex_unlock(&dev->struct_mutex);

	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
		DRM_ERROR("copy_to_user\n");
		return -EFAULT;
	}

	return 0;
}

/* Doesn't need the hardware lock.
 */
static int i915_irq_wait(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_irq_wait_t *irqwait = data;

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	return i915_wait_irq(dev, irqwait->irq_seq);
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_vblank_pipe_t *pipe = data;

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;

	return 0;
}

/**
 * Schedule buffer swap at given vertical blank.
 */
static int i915_vblank_swap(struct drm_device *dev, void *data,
		     struct drm_file *file_priv)
{
	/* The delayed swap mechanism was fundamentally racy, and has been
	 * removed.  The model was that the client requested a delayed flip/swap
	 * from the kernel, then waited for vblank before continuing to perform
	 * rendering.  The problem was that the kernel might wake the client
	 * up before it dispatched the vblank swap (since the lock has to be
	 * held while touching the ringbuffer), in which case the client would
	 * clear and start the next frame before the swap occurred, and
	 * flicker would occur in addition to likely missing the vblank.
	 *
	 * In the absence of this ioctl, userland falls back to a correct path
	 * of waiting for a vblank, then dispatching the swap on its own.
	 * Context switching to userland and back is plenty fast enough for
	 * meeting the requirements of vblank swapping.
	 */
	return -EINVAL;
}

894 895
static int i915_flip_bufs(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
L
Linus Torvalds 已提交
896
{
897 898
	int ret;

899 900 901
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

902
	DRM_DEBUG_DRIVER("%s\n", __func__);
L
Linus Torvalds 已提交
903

904
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
L
Linus Torvalds 已提交
905

906 907 908 909 910
	mutex_lock(&dev->struct_mutex);
	ret = i915_dispatch_flip(dev);
	mutex_unlock(&dev->struct_mutex);

	return ret;
L
Linus Torvalds 已提交
911 912
}

913 914
static int i915_getparam(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
L
Linus Torvalds 已提交
915 916
{
	drm_i915_private_t *dev_priv = dev->dev_private;
917
	drm_i915_getparam_t *param = data;
L
Linus Torvalds 已提交
918 919 920
	int value;

	if (!dev_priv) {
921
		DRM_ERROR("called with no initialization\n");
E
Eric Anholt 已提交
922
		return -EINVAL;
L
Linus Torvalds 已提交
923 924
	}

925
	switch (param->param) {
L
Linus Torvalds 已提交
926
	case I915_PARAM_IRQ_ACTIVE:
927
		value = dev->pdev->irq ? 1 : 0;
L
Linus Torvalds 已提交
928 929
		break;
	case I915_PARAM_ALLOW_BATCHBUFFER:
930
		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
L
Linus Torvalds 已提交
931
		break;
D
Dave Airlie 已提交
932 933 934
	case I915_PARAM_LAST_DISPATCH:
		value = READ_BREADCRUMB(dev_priv);
		break;
K
Kristian Høgsberg 已提交
935
	case I915_PARAM_CHIPSET_ID:
936
		value = dev->pdev->device;
K
Kristian Høgsberg 已提交
937
		break;
938
	case I915_PARAM_HAS_GEM:
939
		value = 1;
940
		break;
941 942 943
	case I915_PARAM_NUM_FENCES_AVAIL:
		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
		break;
944 945 946
	case I915_PARAM_HAS_OVERLAY:
		value = dev_priv->overlay ? 1 : 0;
		break;
947 948 949
	case I915_PARAM_HAS_PAGEFLIPPING:
		value = 1;
		break;
J
Jesse Barnes 已提交
950 951
	case I915_PARAM_HAS_EXECBUF2:
		/* depends on GEM */
952
		value = 1;
J
Jesse Barnes 已提交
953
		break;
954
	case I915_PARAM_HAS_BSD:
955
		value = intel_ring_initialized(&dev_priv->ring[VCS]);
956
		break;
957
	case I915_PARAM_HAS_BLT:
958
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
959
		break;
960 961 962
	case I915_PARAM_HAS_VEBOX:
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
		break;
963 964 965
	case I915_PARAM_HAS_RELAXED_FENCING:
		value = 1;
		break;
966 967 968
	case I915_PARAM_HAS_COHERENT_RINGS:
		value = 1;
		break;
969 970 971
	case I915_PARAM_HAS_EXEC_CONSTANTS:
		value = INTEL_INFO(dev)->gen >= 4;
		break;
972 973 974
	case I915_PARAM_HAS_RELAXED_DELTA:
		value = 1;
		break;
975 976 977
	case I915_PARAM_HAS_GEN7_SOL_RESET:
		value = 1;
		break;
978 979 980
	case I915_PARAM_HAS_LLC:
		value = HAS_LLC(dev);
		break;
981 982 983
	case I915_PARAM_HAS_WT:
		value = HAS_WT(dev);
		break;
984 985 986
	case I915_PARAM_HAS_ALIASING_PPGTT:
		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
		break;
987 988 989
	case I915_PARAM_HAS_WAIT_TIMEOUT:
		value = 1;
		break;
990 991 992
	case I915_PARAM_HAS_SEMAPHORES:
		value = i915_semaphore_is_enabled(dev);
		break;
993 994 995
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
		value = 1;
		break;
996 997 998
	case I915_PARAM_HAS_SECURE_BATCHES:
		value = capable(CAP_SYS_ADMIN);
		break;
999 1000 1001
	case I915_PARAM_HAS_PINNED_BATCHES:
		value = 1;
		break;
1002 1003 1004
	case I915_PARAM_HAS_EXEC_NO_RELOC:
		value = 1;
		break;
1005 1006 1007
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
		value = 1;
		break;
L
Linus Torvalds 已提交
1008
	default:
1009
		DRM_DEBUG("Unknown parameter %d\n", param->param);
E
Eric Anholt 已提交
1010
		return -EINVAL;
L
Linus Torvalds 已提交
1011 1012
	}

1013
	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
L
Linus Torvalds 已提交
1014
		DRM_ERROR("DRM_COPY_TO_USER failed\n");
E
Eric Anholt 已提交
1015
		return -EFAULT;
L
Linus Torvalds 已提交
1016 1017 1018 1019 1020
	}

	return 0;
}

1021 1022
static int i915_setparam(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
L
Linus Torvalds 已提交
1023 1024
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1025
	drm_i915_setparam_t *param = data;
L
Linus Torvalds 已提交
1026 1027

	if (!dev_priv) {
1028
		DRM_ERROR("called with no initialization\n");
E
Eric Anholt 已提交
1029
		return -EINVAL;
L
Linus Torvalds 已提交
1030 1031
	}

1032
	switch (param->param) {
L
Linus Torvalds 已提交
1033 1034 1035 1036 1037
	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
		break;
	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
		break;
	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1038
		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
L
Linus Torvalds 已提交
1039
		break;
1040 1041 1042 1043 1044 1045 1046
	case I915_SETPARAM_NUM_USED_FENCES:
		if (param->value > dev_priv->num_fence_regs ||
		    param->value < 0)
			return -EINVAL;
		/* Userspace can use first N regs */
		dev_priv->fence_reg_start = param->value;
		break;
L
Linus Torvalds 已提交
1047
	default:
1048
		DRM_DEBUG_DRIVER("unknown parameter %d\n",
1049
					param->param);
E
Eric Anholt 已提交
1050
		return -EINVAL;
L
Linus Torvalds 已提交
1051 1052 1053 1054 1055
	}

	return 0;
}

1056 1057
static int i915_set_status_page(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
1058 1059
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1060
	drm_i915_hws_addr_t *hws = data;
1061
	struct intel_ring_buffer *ring;
1062

1063 1064 1065
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

1066 1067
	if (!I915_NEED_GFX_HWS(dev))
		return -EINVAL;
1068 1069

	if (!dev_priv) {
1070
		DRM_ERROR("called with no initialization\n");
E
Eric Anholt 已提交
1071
		return -EINVAL;
1072 1073
	}

J
Jesse Barnes 已提交
1074 1075 1076 1077 1078
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		WARN(1, "tried to set status page when mode setting active\n");
		return 0;
	}

1079
	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1080

1081
	ring = LP_RING(dev_priv);
1082
	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1083

1084
	dev_priv->dri1.gfx_hws_cpu_addr =
B
Ben Widawsky 已提交
1085
		ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1086
	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1087
		i915_dma_cleanup(dev);
1088
		ring->status_page.gfx_addr = 0;
1089 1090
		DRM_ERROR("can not ioremap virtual address for"
				" G33 hw status page\n");
E
Eric Anholt 已提交
1091
		return -ENOMEM;
1092
	}
1093 1094

	memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1095
	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1096

1097
	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1098
			 ring->status_page.gfx_addr);
1099
	DRM_DEBUG_DRIVER("load hws at %p\n",
1100
			 ring->status_page.page_addr);
1101 1102 1103
	return 0;
}

1104 1105 1106 1107
static int i915_get_bridge_dev(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1108
	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1109 1110 1111 1112 1113 1114 1115
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4*4096)

#define DEVEN_REG 0x54
#define   DEVEN_MCHBAR_EN (1 << 28)

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1128
	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1129 1130
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
1131
	int ret;
1132

1133
	if (INTEL_INFO(dev)->gen >= 4)
1134 1135 1136 1137 1138 1139 1140
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
1141 1142
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
1143 1144 1145
#endif

	/* Get some space for it */
1146 1147 1148 1149
	dev_priv->mch_res.name = "i915 MCHBAR";
	dev_priv->mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
				     &dev_priv->mch_res,
1150 1151
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
1152
				     0, pcibios_align_resource,
1153 1154 1155 1156
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
1157
		return ret;
1158 1159
	}

1160
	if (INTEL_INFO(dev)->gen >= 4)
1161 1162 1163 1164 1165
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
1166
	return 0;
1167 1168 1169 1170 1171 1172 1173
}

/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1174
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
	u32 temp;
	bool enabled;

	dev_priv->mchbar_need_disable = false;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

	if (intel_alloc_mchbar_resource(dev))
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}

static void
intel_teardown_mchbar(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1211
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
	u32 temp;

	if (dev_priv->mchbar_need_disable) {
		if (IS_I915G(dev) || IS_I915GM(dev)) {
			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
			temp &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
		} else {
			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
			temp &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
	struct drm_device *dev = cookie;

	intel_modeset_vga_set_state(dev, state);
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

1243 1244 1245 1246 1247
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
	if (state == VGA_SWITCHEROO_ON) {
1248
		pr_info("switched on\n");
1249
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1250 1251 1252
		/* i915 resume handler doesn't set to D0 */
		pci_set_power_state(dev->pdev, PCI_D0);
		i915_resume(dev);
1253
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1254
	} else {
1255
		pr_err("switched off\n");
1256
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1257
		i915_suspend(dev, pmm);
1258
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	}
}

static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	bool can_switch;

	spin_lock(&dev->count_lock);
	can_switch = (dev->open_count == 0);
	spin_unlock(&dev->count_lock);
	return can_switch;
}

1273 1274 1275 1276 1277 1278
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
	.set_gpu_state = i915_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = i915_switcheroo_can_switch,
};

1279 1280 1281 1282
static int i915_load_modeset_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
J
Jesse Barnes 已提交
1283

1284
	ret = intel_parse_bios(dev);
J
Jesse Barnes 已提交
1285 1286 1287
	if (ret)
		DRM_INFO("failed to find VBIOS tables\n");

1288 1289 1290 1291 1292 1293 1294
	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
1295 1296 1297
	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
	if (ret && ret != -ENODEV)
		goto out;
1298

J
Jesse Barnes 已提交
1299 1300
	intel_register_dsm_handler();

1301
	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
1302
	if (ret)
1303
		goto cleanup_vga_client;
1304

1305 1306 1307 1308 1309 1310 1311
	/* Initialise stolen first so that we may reserve preallocated
	 * objects for the BIOS to KMS transition.
	 */
	ret = i915_gem_init_stolen(dev);
	if (ret)
		goto cleanup_vga_switcheroo;

1312 1313 1314 1315
	ret = drm_irq_install(dev);
	if (ret)
		goto cleanup_gem_stolen;

1316
	intel_power_domains_init_hw(dev);
1317

1318 1319
	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
1320 1321
	intel_modeset_init(dev);

1322
	ret = i915_gem_init(dev);
J
Jesse Barnes 已提交
1323
	if (ret)
1324
		goto cleanup_power;
1325

1326 1327
	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);

1328
	intel_modeset_gem_init(dev);
1329

J
Jesse Barnes 已提交
1330 1331
	/* Always safe in the mode setting case. */
	/* FIXME: do pre/post-mode set stuff in core KMS code */
1332
	dev->vblank_disable_allowed = true;
1333 1334
	if (INTEL_INFO(dev)->num_pipes == 0) {
		intel_display_power_put(dev, POWER_DOMAIN_VGA);
B
Ben Widawsky 已提交
1335
		return 0;
1336
	}
J
Jesse Barnes 已提交
1337

1338 1339
	ret = intel_fbdev_init(dev);
	if (ret)
1340 1341
		goto cleanup_gem;

1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
	/* Only enable hotplug handling once the fbdev is fully set up. */
	intel_hpd_init(dev);

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. Now we should scan for the initial config
	 * only once hotplug handling is enabled, but due to screwed-up locking
	 * around kms/fbdev init we can't protect the fdbev initial config
	 * scanning against hotplug events. Hence do this first and ignore the
	 * tiny window where we will loose hotplug notifactions.
	 */
	intel_fbdev_initial_config(dev);

1357 1358
	/* Only enable hotplug handling once the fbdev is fully set up. */
	dev_priv->enable_hotplug_processing = true;
1359

1360
	drm_kms_helper_poll_init(dev);
1361

J
Jesse Barnes 已提交
1362 1363
	return 0;

1364 1365 1366
cleanup_gem:
	mutex_lock(&dev->struct_mutex);
	i915_gem_cleanup_ringbuffer(dev);
1367
	i915_gem_context_fini(dev);
1368
	mutex_unlock(&dev->struct_mutex);
1369
	i915_gem_cleanup_aliasing_ppgtt(dev);
1370
	drm_mm_takedown(&dev_priv->gtt.base.mm);
1371 1372
cleanup_power:
	intel_display_power_put(dev, POWER_DOMAIN_VGA);
1373
	drm_irq_uninstall(dev);
1374 1375
cleanup_gem_stolen:
	i915_gem_cleanup_stolen(dev);
1376 1377 1378 1379
cleanup_vga_switcheroo:
	vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
	vga_client_register(dev->pdev, NULL, NULL, NULL);
J
Jesse Barnes 已提交
1380 1381 1382 1383
out:
	return ret;
}

1384 1385 1386 1387
int i915_master_create(struct drm_device *dev, struct drm_master *master)
{
	struct drm_i915_master_private *master_priv;

1388
	master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	if (!master_priv)
		return -ENOMEM;

	master->driver_priv = master_priv;
	return 0;
}

void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
{
	struct drm_i915_master_private *master_priv = master->driver_priv;

	if (!master_priv)
		return;

1403
	kfree(master_priv);
1404 1405 1406 1407

	master->driver_priv = NULL;
}

1408
#ifdef CONFIG_DRM_I915_FBDEV
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	struct apertures_struct *ap;
	struct pci_dev *pdev = dev_priv->dev->pdev;
	bool primary;

	ap = alloc_apertures(1);
	if (!ap)
		return;

1419
	ap->ranges[0].base = dev_priv->gtt.mappable_base;
1420
	ap->ranges[0].size = dev_priv->gtt.mappable_end;
1421

1422 1423 1424 1425 1426 1427 1428
	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);

	kfree(ap);
}
1429 1430 1431 1432 1433
#else
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
}
#endif
1434

D
Daniel Vetter 已提交
1435 1436 1437 1438
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
	const struct intel_device_info *info = dev_priv->info;

1439 1440
#define PRINT_S(name) "%s"
#define SEP_EMPTY
1441 1442
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
D
Daniel Vetter 已提交
1443
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1444
			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
D
Daniel Vetter 已提交
1445 1446
			 info->gen,
			 dev_priv->dev->pdev->device,
1447
			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
1448 1449
#undef PRINT_S
#undef SEP_EMPTY
1450 1451
#undef PRINT_FLAG
#undef SEP_COMMA
D
Daniel Vetter 已提交
1452 1453
}

J
Jesse Barnes 已提交
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
/**
 * i915_driver_load - setup chip and create an initial config
 * @dev: DRM device
 * @flags: startup flags
 *
 * The driver load routine has to do several things:
 *   - drive output discovery via intel_modeset_init()
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
1465
int i915_driver_load(struct drm_device *dev, unsigned long flags)
1466
{
1467
	struct drm_i915_private *dev_priv;
1468
	struct intel_device_info *info;
1469
	int ret = 0, mmio_bar, mmio_size;
1470
	uint32_t aperture_size;
1471

1472 1473 1474
	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
1475 1476 1477
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1478
		return -ENODEV;
1479
	}
1480

1481
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
J
Jesse Barnes 已提交
1482 1483 1484 1485
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
1486
	dev_priv->dev = dev;
1487
	dev_priv->info = info;
J
Jesse Barnes 已提交
1488

1489 1490
	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
1491
	spin_lock_init(&dev_priv->backlight_lock);
1492
	spin_lock_init(&dev_priv->uncore.lock);
1493
	spin_lock_init(&dev_priv->mm.object_stat_lock);
1494 1495 1496 1497
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->rps.hw_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

1498 1499 1500 1501 1502 1503 1504 1505
	mutex_init(&dev_priv->pc8.lock);
	dev_priv->pc8.requirements_met = false;
	dev_priv->pc8.gpu_idle = false;
	dev_priv->pc8.irqs_disabled = false;
	dev_priv->pc8.enabled = false;
	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);

1506 1507
	intel_display_crc_init(dev);

D
Daniel Vetter 已提交
1508 1509
	i915_dump_device_info(dev_priv);

1510 1511 1512 1513 1514 1515 1516 1517
	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

1518 1519 1520 1521 1522
	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

1543
	intel_uncore_early_sanitize(dev);
1544

1545 1546 1547 1548 1549
	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

1550 1551
	ret = i915_gem_gtt_init(dev);
	if (ret)
1552
		goto out_regs;
1553

1554 1555
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		i915_kick_out_firmware_fb(dev_priv);
1556

1557 1558
	pci_set_master(dev->pdev);

1559 1560 1561 1562
	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

1574
	aperture_size = dev_priv->gtt.mappable_end;
1575

B
Ben Widawsky 已提交
1576 1577
	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
1578
				     aperture_size);
B
Ben Widawsky 已提交
1579
	if (dev_priv->gtt.mappable == NULL) {
1580
		ret = -EIO;
1581
		goto out_gtt;
1582 1583
	}

1584 1585
	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);
1586

1587 1588 1589 1590 1591 1592 1593
	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
1594
	 * idle-timers and recording error state.
1595 1596 1597
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
1598
	 * workqueue at any time.  Use an ordered one.
1599
	 */
1600
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1601 1602 1603
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
1604
		goto out_mtrrfree;
1605 1606
	}

1607
	intel_irq_init(dev);
1608
	intel_pm_init(dev);
1609
	intel_uncore_sanitize(dev);
1610

1611 1612
	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
1613
	intel_setup_gmbus(dev);
1614
	intel_opregion_setup(dev);
1615

1616 1617
	intel_setup_bios(dev);

1618 1619
	i915_gem_load(dev);

1620 1621 1622 1623 1624 1625
	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
1626 1627
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
1628 1629
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
1630
	 */
1631
	if (!IS_I945G(dev) && !IS_I945GM(dev))
1632
		pci_enable_msi(dev->pdev);
1633

1634 1635 1636 1637
	dev_priv->num_plane = 1;
	if (IS_VALLEYVIEW(dev))
		dev_priv->num_plane = 2;

B
Ben Widawsky 已提交
1638 1639 1640 1641 1642
	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}
1643

1644
	intel_power_domains_init(dev);
1645

J
Jesse Barnes 已提交
1646
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
D
Daniel Vetter 已提交
1647
		ret = i915_load_modeset_init(dev);
J
Jesse Barnes 已提交
1648 1649
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
1650
			goto out_power_well;
J
Jesse Barnes 已提交
1651
		}
1652 1653 1654
	} else {
		/* Start out suspended in ums mode. */
		dev_priv->ums.mm_suspended = 1;
J
Jesse Barnes 已提交
1655 1656
	}

B
Ben Widawsky 已提交
1657 1658
	i915_setup_sysfs(dev);

B
Ben Widawsky 已提交
1659 1660 1661
	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
1662
		acpi_video_register();
B
Ben Widawsky 已提交
1663
	}
1664

1665 1666
	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);
1667

1668 1669
	intel_init_runtime_pm(dev_priv);

J
Jesse Barnes 已提交
1670 1671
	return 0;

1672
out_power_well:
1673
	intel_power_domains_remove(dev);
1674
	drm_vblank_cleanup(dev);
1675
out_gem_unload:
1676
	if (dev_priv->mm.inactive_shrinker.scan_objects)
1677 1678
		unregister_shrinker(&dev_priv->mm.inactive_shrinker);

1679 1680 1681 1682 1683
	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
1684
	destroy_workqueue(dev_priv->wq);
1685
out_mtrrfree:
1686
	arch_phys_wc_del(dev_priv->gtt.mtrr);
B
Ben Widawsky 已提交
1687
	io_mapping_free(dev_priv->gtt.mappable);
1688 1689 1690
out_gtt:
	list_del(&dev_priv->gtt.base.global_link);
	drm_mm_takedown(&dev_priv->gtt.base.mm);
1691
	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1692
out_regs:
1693
	intel_uncore_fini(dev);
1694
	pci_iounmap(dev->pdev, dev_priv->regs);
1695 1696
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
J
Jesse Barnes 已提交
1697
free_priv:
1698 1699
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
1700
	kfree(dev_priv);
J
Jesse Barnes 已提交
1701 1702 1703 1704 1705 1706
	return ret;
}

int i915_driver_unload(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1707
	int ret;
J
Jesse Barnes 已提交
1708

1709 1710 1711 1712 1713 1714
	ret = i915_gem_suspend(dev);
	if (ret) {
		DRM_ERROR("failed to idle hardware: %d\n", ret);
		return ret;
	}

1715 1716
	intel_fini_runtime_pm(dev_priv);

1717
	intel_gpu_ips_teardown();
1718

1719 1720 1721 1722 1723
	/* The i915.ko module is still not prepared to be loaded when
	 * the power well is not enabled, so just enable it in case
	 * we're going to unload/reload. */
	intel_display_set_init_power(dev, true);
	intel_power_domains_remove(dev);
1724

B
Ben Widawsky 已提交
1725 1726
	i915_teardown_sysfs(dev);

1727
	if (dev_priv->mm.inactive_shrinker.scan_objects)
1728 1729
		unregister_shrinker(&dev_priv->mm.inactive_shrinker);

B
Ben Widawsky 已提交
1730
	io_mapping_free(dev_priv->gtt.mappable);
1731
	arch_phys_wc_del(dev_priv->gtt.mtrr);
1732

1733 1734
	acpi_video_unregister();

J
Jesse Barnes 已提交
1735
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1736
		intel_fbdev_fini(dev);
1737
		intel_modeset_cleanup(dev);
1738
		cancel_work_sync(&dev_priv->console_resume_work);
1739

Z
Zhao Yakui 已提交
1740 1741 1742 1743
		/*
		 * free the memory space allocated for the child device
		 * config parsed from VBT
		 */
1744 1745 1746 1747
		if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
			kfree(dev_priv->vbt.child_dev);
			dev_priv->vbt.child_dev = NULL;
			dev_priv->vbt.child_dev_num = 0;
Z
Zhao Yakui 已提交
1748
		}
1749

1750
		vga_switcheroo_unregister_client(dev->pdev);
1751
		vga_client_register(dev->pdev, NULL, NULL, NULL);
J
Jesse Barnes 已提交
1752 1753
	}

1754
	/* Free error state after interrupts are fully disabled. */
1755 1756
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
	cancel_work_sync(&dev_priv->gpu_error.work);
1757
	i915_destroy_error_state(dev);
1758

1759 1760
	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);

1761 1762 1763
	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

1764
	intel_opregion_fini(dev);
1765

J
Jesse Barnes 已提交
1766
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1767 1768 1769
		/* Flush any outstanding unpin_work. */
		flush_workqueue(dev_priv->wq);

J
Jesse Barnes 已提交
1770
		mutex_lock(&dev->struct_mutex);
1771
		i915_gem_free_all_phys_object(dev);
J
Jesse Barnes 已提交
1772
		i915_gem_cleanup_ringbuffer(dev);
1773
		i915_gem_context_fini(dev);
J
Jesse Barnes 已提交
1774
		mutex_unlock(&dev->struct_mutex);
1775
		i915_gem_cleanup_aliasing_ppgtt(dev);
1776
		i915_gem_cleanup_stolen(dev);
1777 1778 1779

		if (!I915_NEED_GFX_HWS(dev))
			i915_free_hws(dev);
J
Jesse Barnes 已提交
1780 1781
	}

1782 1783
	list_del(&dev_priv->gtt.base.global_link);
	WARN_ON(!list_empty(&dev_priv->vm_list));
D
Daniel Vetter 已提交
1784

1785 1786
	drm_vblank_cleanup(dev);

1787
	intel_teardown_gmbus(dev);
1788 1789
	intel_teardown_mchbar(dev);

1790
	destroy_workqueue(dev_priv->wq);
1791
	pm_qos_remove_request(&dev_priv->pm_qos);
1792

1793
	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1794

1795 1796 1797 1798
	intel_uncore_fini(dev);
	if (dev_priv->regs != NULL)
		pci_iounmap(dev->pdev, dev_priv->regs);

1799 1800
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
1801

1802
	pci_dev_put(dev_priv->bridge_dev);
1803
	kfree(dev->dev_private);
J
Jesse Barnes 已提交
1804

1805 1806 1807
	return 0;
}

1808
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1809
{
1810
	int ret;
1811

1812 1813 1814
	ret = i915_gem_open(dev, file);
	if (ret)
		return ret;
1815

1816 1817 1818
	return 0;
}

J
Jesse Barnes 已提交
1819 1820 1821 1822 1823 1824 1825 1826
/**
 * i915_driver_lastclose - clean up after all DRM clients have exited
 * @dev: DRM device
 *
 * Take care of cleaning up after all DRM clients have exited.  In the
 * mode setting case, we want to restore the kernel's initial mode (just
 * in case the last client left us in a bad state).
 *
1827
 * Additionally, in the non-mode setting case, we'll tear down the GTT
J
Jesse Barnes 已提交
1828 1829 1830
 * and DMA structures, since the kernel won't be using them, and clea
 * up any GEM state.
 */
1831
void i915_driver_lastclose(struct drm_device * dev)
L
Linus Torvalds 已提交
1832
{
J
Jesse Barnes 已提交
1833 1834
	drm_i915_private_t *dev_priv = dev->dev_private;

1835 1836 1837 1838 1839 1840 1841
	/* On gen6+ we refuse to init without kms enabled, but then the drm core
	 * goes right around and calls lastclose. Check for this and don't clean
	 * up anything. */
	if (!dev_priv)
		return;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1842
		intel_fbdev_restore_mode(dev);
1843
		vga_switcheroo_process_delayed_switch();
D
Dave Airlie 已提交
1844
		return;
J
Jesse Barnes 已提交
1845
	}
D
Dave Airlie 已提交
1846

1847 1848
	i915_gem_lastclose(dev);

D
Dave Airlie 已提交
1849
	i915_dma_cleanup(dev);
L
Linus Torvalds 已提交
1850 1851
}

1852
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
L
Linus Torvalds 已提交
1853
{
1854
	i915_gem_context_close(dev, file_priv);
1855
	i915_gem_release(dev, file_priv);
L
Linus Torvalds 已提交
1856 1857
}

1858
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1859
{
1860
	struct drm_i915_file_private *file_priv = file->driver_priv;
1861

1862
	kfree(file_priv);
1863 1864
}

R
Rob Clark 已提交
1865
const struct drm_ioctl_desc i915_ioctls[] = {
1866 1867 1868 1869 1870 1871
	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1872
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1873
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
D
Daniel Vetter 已提交
1874 1875 1876
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1877
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
D
Daniel Vetter 已提交
1878
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1879
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1880 1881 1882 1883 1884
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1885
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1886 1887
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1888 1889 1890 1891
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1892 1893
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1904
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1905
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1906 1907
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1908 1909
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1910 1911 1912 1913
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1914
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
D
Dave Airlie 已提交
1915 1916 1917
};

int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1918

1919 1920 1921 1922
/*
 * This is really ugly: Because old userspace abused the linux agp interface to
 * manage the gtt, we need to claim that all intel devices are agp.  For
 * otherwise the drm core refuses to initialize the agp support code.
1923
 */
1924
int i915_driver_device_is_agp(struct drm_device * dev)
1925 1926 1927
{
	return 1;
}