intel_ringbuffer.c 24.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drv.h"
33
#include "i915_drm.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 seqno;

	seqno = dev_priv->next_seqno;

	/* reserve 0 for non-seqno */
	if (++dev_priv->next_seqno == 0)
		dev_priv->next_seqno = 1;

	return seqno;
}

51 52
static void
render_ring_flush(struct drm_device *dev,
53 54 55
		  struct intel_ring_buffer *ring,
		  u32	invalidate_domains,
		  u32	flush_domains)
56
{
57 58 59
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 cmd;

60 61 62 63
#if WATCH_EXEC
	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
		  invalidate_domains, flush_domains);
#endif
64 65

	trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
				     invalidate_domains, flush_domains);

	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
		/*
		 * read/write caches:
		 *
		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
		 * also flushed at 2d versus 3d pipeline switches.
		 *
		 * read-only caches:
		 *
		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
		 * MI_READ_FLUSH is set, and is always flushed on 965.
		 *
		 * I915_GEM_DOMAIN_COMMAND may not exist?
		 *
		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
		 * invalidated when MI_EXE_FLUSH is set.
		 *
		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
		 * invalidated with every MI_FLUSH.
		 *
		 * TLBs:
		 *
		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
		 * are flushed at any MI_FLUSH.
		 */

		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
		if ((invalidate_domains|flush_domains) &
		    I915_GEM_DOMAIN_RENDER)
			cmd &= ~MI_NO_WRITE_FLUSH;
101
		if (INTEL_INFO(dev)->gen < 4) {
102 103 104 105 106 107 108 109 110 111 112 113 114
			/*
			 * On the 965, the sampler cache always gets flushed
			 * and this bit is reserved.
			 */
			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
				cmd |= MI_READ_FLUSH;
		}
		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
			cmd |= MI_EXE_FLUSH;

#if WATCH_EXEC
		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
115
		intel_ring_begin(dev, ring, 2);
116 117 118
		intel_ring_emit(dev, ring, cmd);
		intel_ring_emit(dev, ring, MI_NOOP);
		intel_ring_advance(dev, ring);
119
	}
120 121
}

122 123 124
static void ring_set_tail(struct drm_device *dev,
			  struct intel_ring_buffer *ring,
			  u32 value)
125 126
{
	drm_i915_private_t *dev_priv = dev->dev_private;
127
	I915_WRITE_TAIL(ring, ring->tail);
128 129
}

130
static unsigned int render_ring_get_active_head(struct drm_device *dev,
131
						struct intel_ring_buffer *ring)
132 133
{
	drm_i915_private_t *dev_priv = dev->dev_private;
D
Daniel Vetter 已提交
134
	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD;
135 136 137 138 139

	return I915_READ(acthd_reg);
}

static int init_ring_common(struct drm_device *dev,
140
			    struct intel_ring_buffer *ring)
141 142 143 144 145 146 147
{
	u32 head;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv;
	obj_priv = to_intel_bo(ring->gem_object);

	/* Stop the ring if it's running. */
148
	I915_WRITE_CTL(ring, 0);
149
	I915_WRITE_HEAD(ring, 0);
150
	ring->set_tail(dev, ring, 0);
151 152

	/* Initialize the ring. */
153
	I915_WRITE_START(ring, obj_priv->gtt_offset);
154
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
155 156 157 158 159 160

	/* G45 ring initialization fails to reset head to zero */
	if (head != 0) {
		DRM_ERROR("%s head not reset to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
161
				I915_READ_CTL(ring),
162
				I915_READ_HEAD(ring),
163
				I915_READ_TAIL(ring),
164
				I915_READ_START(ring));
165

166
		I915_WRITE_HEAD(ring, 0);
167 168 169 170

		DRM_ERROR("%s head forced to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
171
				I915_READ_CTL(ring),
172
				I915_READ_HEAD(ring),
173
				I915_READ_TAIL(ring),
174
				I915_READ_START(ring));
175 176
	}

177
	I915_WRITE_CTL(ring,
178 179 180
			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
			| RING_NO_REPORT | RING_VALID);

181
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
182 183 184 185 186
	/* If the head is still not zero, the ring is dead */
	if (head != 0) {
		DRM_ERROR("%s initialization failed "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
187
				I915_READ_CTL(ring),
188
				I915_READ_HEAD(ring),
189
				I915_READ_TAIL(ring),
190
				I915_READ_START(ring));
191 192 193 194 195 196
		return -EIO;
	}

	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		i915_kernel_lost_context(dev);
	else {
197
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
198
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
199 200 201 202 203 204 205 206
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
			ring->space += ring->size;
	}
	return 0;
}

static int init_render_ring(struct drm_device *dev,
207
			    struct intel_ring_buffer *ring)
208 209 210
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret = init_ring_common(dev, ring);
211 212
	int mode;

213
	if (INTEL_INFO(dev)->gen > 3) {
214 215 216 217
		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
		if (IS_GEN6(dev))
			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
		I915_WRITE(MI_MODE, mode);
218 219 220 221
	}
	return ret;
}

222
#define PIPE_CONTROL_FLUSH(addr)					\
223
do {									\
224
	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
225
		 PIPE_CONTROL_DEPTH_STALL | 2);				\
226 227 228
	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
	OUT_RING(0);							\
	OUT_RING(0);							\
229
} while (0)
230 231 232 233 234 235 236 237 238

/**
 * Creates a new sequence number, emitting a write of it to the status page
 * plus an interrupt, which will trigger i915_user_interrupt_handler.
 *
 * Must be called with struct_lock held.
 *
 * Returned sequence numbers are nonzero on success.
 */
239 240
static u32
render_ring_add_request(struct drm_device *dev,
241 242
			struct intel_ring_buffer *ring,
			u32 flush_domains)
243 244
{
	drm_i915_private_t *dev_priv = dev->dev_private;
245 246 247
	u32 seqno;

	seqno = i915_gem_get_seqno(dev);
248 249 250 251 252 253 254 255 256 257 258 259 260

	if (IS_GEN6(dev)) {
		BEGIN_LP_RING(6);
		OUT_RING(GFX_OP_PIPE_CONTROL | 3);
		OUT_RING(PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
			 PIPE_CONTROL_NOTIFY);
		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		OUT_RING(seqno);
		OUT_RING(0);
		OUT_RING(0);
		ADVANCE_LP_RING();
	} else if (HAS_PIPE_CONTROL(dev)) {
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;

		/*
		 * Workaround qword write incoherence by flushing the
		 * PIPE_NOTIFY buffers out to memory before requesting
		 * an interrupt.
		 */
		BEGIN_LP_RING(32);
		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		OUT_RING(seqno);
		OUT_RING(0);
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128; /* write to separate cachelines */
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
			 PIPE_CONTROL_NOTIFY);
		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		OUT_RING(seqno);
		OUT_RING(0);
		ADVANCE_LP_RING();
	} else {
		BEGIN_LP_RING(4);
		OUT_RING(MI_STORE_DWORD_INDEX);
		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
		OUT_RING(seqno);

		OUT_RING(MI_USER_INTERRUPT);
		ADVANCE_LP_RING();
	}
	return seqno;
}

304
static u32
305 306
render_ring_get_seqno(struct drm_device *dev,
		      struct intel_ring_buffer *ring)
307 308 309 310 311 312 313 314 315 316
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	if (HAS_PIPE_CONTROL(dev))
		return ((volatile u32 *)(dev_priv->seqno_page))[0];
	else
		return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static void
render_ring_get_user_irq(struct drm_device *dev,
317
			 struct intel_ring_buffer *ring)
318 319 320 321 322
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
323
	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
324 325 326 327 328 329 330 331
		if (HAS_PCH_SPLIT(dev))
			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
		else
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
	}
	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}

332 333
static void
render_ring_put_user_irq(struct drm_device *dev,
334
			 struct intel_ring_buffer *ring)
335 336 337 338 339
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
340 341
	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
342 343 344 345 346 347 348 349
		if (HAS_PCH_SPLIT(dev))
			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
		else
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
	}
	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}

350
static void render_setup_status_page(struct drm_device *dev,
351
				     struct	intel_ring_buffer *ring)
352 353 354 355 356 357 358 359 360 361 362 363
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	if (IS_GEN6(dev)) {
		I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
		I915_READ(HWS_PGA_GEN6); /* posting read */
	} else {
		I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
		I915_READ(HWS_PGA); /* posting read */
	}

}

364
static void
365 366 367 368 369
bsd_ring_flush(struct drm_device *dev,
		struct intel_ring_buffer *ring,
		u32     invalidate_domains,
		u32     flush_domains)
{
370
	intel_ring_begin(dev, ring, 2);
371 372 373 374 375
	intel_ring_emit(dev, ring, MI_FLUSH);
	intel_ring_emit(dev, ring, MI_NOOP);
	intel_ring_advance(dev, ring);
}

376 377
static unsigned int bsd_ring_get_active_head(struct drm_device *dev,
					     struct intel_ring_buffer *ring)
378 379 380 381 382 383
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	return I915_READ(BSD_RING_ACTHD);
}

static int init_bsd_ring(struct drm_device *dev,
384
			 struct intel_ring_buffer *ring)
385 386 387 388 389 390
{
	return init_ring_common(dev, ring);
}

static u32
bsd_ring_add_request(struct drm_device *dev,
391 392
		     struct intel_ring_buffer *ring,
		     u32 flush_domains)
393 394
{
	u32 seqno;
395 396 397

	seqno = i915_gem_get_seqno(dev);

398 399 400 401 402 403 404 405 406 407 408 409 410 411
	intel_ring_begin(dev, ring, 4);
	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
	intel_ring_emit(dev, ring,
			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(dev, ring, seqno);
	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
	intel_ring_advance(dev, ring);

	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);

	return seqno;
}

static void bsd_setup_status_page(struct drm_device *dev,
412
				  struct  intel_ring_buffer *ring)
413 414 415 416 417 418 419 420
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
	I915_READ(BSD_HWS_PGA);
}

static void
bsd_ring_get_user_irq(struct drm_device *dev,
421
		      struct intel_ring_buffer *ring)
422 423 424 425 426
{
	/* do nothing */
}
static void
bsd_ring_put_user_irq(struct drm_device *dev,
427
		      struct intel_ring_buffer *ring)
428 429 430 431 432
{
	/* do nothing */
}

static u32
433 434
bsd_ring_get_seqno(struct drm_device *dev,
		   struct intel_ring_buffer *ring)
435 436 437 438 439 440
{
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static int
bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
441 442 443 444
				 struct intel_ring_buffer *ring,
				 struct drm_i915_gem_execbuffer2 *exec,
				 struct drm_clip_rect *cliprects,
				 uint64_t exec_offset)
445 446 447 448 449 450 451 452 453 454 455 456
{
	uint32_t exec_start;
	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
	intel_ring_begin(dev, ring, 2);
	intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
			(2 << 6) | MI_BATCH_NON_SECURE_I965);
	intel_ring_emit(dev, ring, exec_start);
	intel_ring_advance(dev, ring);
	return 0;
}


457 458
static int
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
459 460 461 462
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_execbuffer2 *exec,
				    struct drm_clip_rect *cliprects,
				    uint64_t exec_offset)
463 464 465 466 467 468 469 470
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int nbox = exec->num_cliprects;
	int i = 0, count;
	uint32_t exec_start, exec_len;
	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
	exec_len = (uint32_t) exec->batch_len;

471
	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
472 473 474 475 476 477 478 479 480 481 482 483

	count = nbox ? nbox : 1;

	for (i = 0; i < count; i++) {
		if (i < nbox) {
			int ret = i915_emit_box(dev, cliprects, i,
						exec->DR1, exec->DR4);
			if (ret)
				return ret;
		}

		if (IS_I830(dev) || IS_845G(dev)) {
484 485 486 487 488 489
			intel_ring_begin(dev, ring, 4);
			intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
			intel_ring_emit(dev, ring,
					exec_start | MI_BATCH_NON_SECURE);
			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
			intel_ring_emit(dev, ring, 0);
490
		} else {
491
			intel_ring_begin(dev, ring, 4);
492
			if (INTEL_INFO(dev)->gen >= 4) {
493 494 495 496
				intel_ring_emit(dev, ring,
						MI_BATCH_BUFFER_START | (2 << 6)
						| MI_BATCH_NON_SECURE_I965);
				intel_ring_emit(dev, ring, exec_start);
497
			} else {
498 499 500 501
				intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
						| (2 << 6));
				intel_ring_emit(dev, ring, exec_start |
						MI_BATCH_NON_SECURE);
502 503
			}
		}
504
		intel_ring_advance(dev, ring);
505 506
	}

507 508 509 510 511 512 513 514
	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
		intel_ring_begin(dev, ring, 2);
		intel_ring_emit(dev, ring, MI_FLUSH |
				MI_NO_WRITE_FLUSH |
				MI_INVALIDATE_ISP );
		intel_ring_emit(dev, ring, MI_NOOP);
		intel_ring_advance(dev, ring);
	}
515
	/* XXX breadcrumb */
516

517 518 519
	return 0;
}

520
static void cleanup_status_page(struct drm_device *dev,
521
				struct intel_ring_buffer *ring)
522 523 524 525 526
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

527 528
	obj = ring->status_page.obj;
	if (obj == NULL)
529 530 531 532 533 534
		return;
	obj_priv = to_intel_bo(obj);

	kunmap(obj_priv->pages[0]);
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(obj);
535
	ring->status_page.obj = NULL;
536 537 538 539

	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}

540
static int init_status_page(struct drm_device *dev,
541
			    struct intel_ring_buffer *ring)
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

	obj = i915_gem_alloc_object(dev, 4096);
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate status page\n");
		ret = -ENOMEM;
		goto err;
	}
	obj_priv = to_intel_bo(obj);
	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;

	ret = i915_gem_object_pin(obj, 4096);
	if (ret != 0) {
		goto err_unref;
	}

562 563 564
	ring->status_page.gfx_addr = obj_priv->gtt_offset;
	ring->status_page.page_addr = kmap(obj_priv->pages[0]);
	if (ring->status_page.page_addr == NULL) {
565 566 567
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
		goto err_unpin;
	}
568 569
	ring->status_page.obj = obj;
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
570

571 572 573
	ring->setup_status_page(dev, ring);
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
			ring->name, ring->status_page.gfx_addr);
574 575 576 577 578 579 580 581

	return 0;

err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
err:
582
	return ret;
583 584
}

585
int intel_init_ring_buffer(struct drm_device *dev,
586
			   struct intel_ring_buffer *ring)
587
{
588
	struct drm_i915_private *dev_priv = dev->dev_private;
589 590
	struct drm_i915_gem_object *obj_priv;
	struct drm_gem_object *obj;
591 592
	int ret;

593
	ring->dev = dev;
594

595 596 597 598 599
	if (I915_NEED_GFX_HWS(dev)) {
		ret = init_status_page(dev, ring);
		if (ret)
			return ret;
	}
600

601
	obj = i915_gem_alloc_object(dev, ring->size);
602 603
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate ringbuffer\n");
604
		ret = -ENOMEM;
605
		goto err_hws;
606 607
	}

608 609
	ring->gem_object = obj;

610
	ret = i915_gem_object_pin(obj, PAGE_SIZE);
611 612
	if (ret)
		goto err_unref;
613

614 615
	obj_priv = to_intel_bo(obj);
	ring->map.size = ring->size;
616 617 618 619 620 621 622 623
	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
	ring->map.type = 0;
	ring->map.flags = 0;
	ring->map.mtrr = 0;

	drm_core_ioremap_wc(&ring->map, dev);
	if (ring->map.handle == NULL) {
		DRM_ERROR("Failed to map ringbuffer.\n");
624
		ret = -EINVAL;
625
		goto err_unpin;
626 627
	}

628 629
	ring->virtual_start = ring->map.handle;
	ret = ring->init(dev, ring);
630 631
	if (ret)
		goto err_unmap;
632 633 634 635

	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		i915_kernel_lost_context(dev);
	else {
636
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
637
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
638 639
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
640
			ring->space += ring->size;
641
	}
642 643 644
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	return ret;
645 646 647 648 649 650 651 652 653

err_unmap:
	drm_core_ioremapfree(&ring->map, dev);
err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
	ring->gem_object = NULL;
err_hws:
654 655
	cleanup_status_page(dev, ring);
	return ret;
656 657
}

658
void intel_cleanup_ring_buffer(struct drm_device *dev,
659
			       struct intel_ring_buffer *ring)
660
{
661
	if (ring->gem_object == NULL)
662 663
		return;

664
	drm_core_ioremapfree(&ring->map, dev);
665

666 667 668 669
	i915_gem_object_unpin(ring->gem_object);
	drm_gem_object_unreference(ring->gem_object);
	ring->gem_object = NULL;
	cleanup_status_page(dev, ring);
670 671
}

672 673
static int intel_wrap_ring_buffer(struct drm_device *dev,
				  struct intel_ring_buffer *ring)
674
{
675
	unsigned int *virt;
676
	int rem;
677
	rem = ring->size - ring->tail;
678

679 680
	if (ring->space < rem) {
		int ret = intel_wait_ring_buffer(dev, ring, rem);
681 682 683 684
		if (ret)
			return ret;
	}

685
	virt = (unsigned int *)(ring->virtual_start + ring->tail);
686 687
	rem /= 8;
	while (rem--) {
688
		*virt++ = MI_NOOP;
689 690
		*virt++ = MI_NOOP;
	}
691

692
	ring->tail = 0;
693
	ring->space = ring->head - 8;
694 695 696 697

	return 0;
}

698
int intel_wait_ring_buffer(struct drm_device *dev,
699
			   struct intel_ring_buffer *ring, int n)
700
{
701
	unsigned long end;
702
	drm_i915_private_t *dev_priv = dev->dev_private;
703 704

	trace_i915_ring_wait_begin (dev);
705 706
	end = jiffies + 3 * HZ;
	do {
707
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
708 709
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
710
			ring->space += ring->size;
711 712 713 714 715 716 717 718 719 720
		if (ring->space >= n) {
			trace_i915_ring_wait_end (dev);
			return 0;
		}

		if (dev->primary->master) {
			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
			if (master_priv->sarea_priv)
				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
		}
721

722 723 724 725 726
		yield();
	} while (!time_after(jiffies, end));
	trace_i915_ring_wait_end (dev);
	return -EBUSY;
}
727

728
void intel_ring_begin(struct drm_device *dev,
729 730
		      struct intel_ring_buffer *ring,
		      int num_dwords)
731
{
732
	int n = 4*num_dwords;
733 734 735 736
	if (unlikely(ring->tail + n > ring->size))
		intel_wrap_ring_buffer(dev, ring);
	if (unlikely(ring->space < n))
		intel_wait_ring_buffer(dev, ring, n);
737 738

	ring->space -= n;
739
}
740

741
void intel_ring_advance(struct drm_device *dev,
742
			struct intel_ring_buffer *ring)
743
{
744
	ring->tail &= ring->size - 1;
745
	ring->set_tail(dev, ring, ring->tail);
746
}
747

748
void intel_fill_struct(struct drm_device *dev,
749 750 751
		       struct intel_ring_buffer *ring,
		       void *data,
		       unsigned int len)
752 753 754
{
	unsigned int *virt = ring->virtual_start + ring->tail;
	BUG_ON((len&~(4-1)) != 0);
755
	intel_ring_begin(dev, ring, len/4);
756 757 758 759 760 761
	memcpy(virt, data, len);
	ring->tail += len;
	ring->tail &= ring->size - 1;
	ring->space -= len;
	intel_ring_advance(dev, ring);
}
762

763
static const struct intel_ring_buffer render_ring = {
764
	.name			= "render ring",
765
	.id			= RING_RENDER,
766
	.mmio_base		= RENDER_RING_BASE,
767 768 769
	.size			= 32 * PAGE_SIZE,
	.setup_status_page	= render_setup_status_page,
	.init			= init_render_ring,
770
	.set_tail		= ring_set_tail,
771 772 773
	.get_active_head	= render_ring_get_active_head,
	.flush			= render_ring_flush,
	.add_request		= render_ring_add_request,
774
	.get_seqno		= render_ring_get_seqno,
775 776 777 778
	.user_irq_get		= render_ring_get_user_irq,
	.user_irq_put		= render_ring_put_user_irq,
	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
};
779 780 781

/* ring buffer for bit-stream decoder */

782
static const struct intel_ring_buffer bsd_ring = {
783
	.name                   = "bsd ring",
784
	.id			= RING_BSD,
785
	.mmio_base		= BSD_RING_BASE,
786 787 788
	.size			= 32 * PAGE_SIZE,
	.setup_status_page	= bsd_setup_status_page,
	.init			= init_bsd_ring,
789
	.set_tail		= ring_set_tail,
790 791 792
	.get_active_head	= bsd_ring_get_active_head,
	.flush			= bsd_ring_flush,
	.add_request		= bsd_ring_add_request,
793
	.get_seqno		= bsd_ring_get_seqno,
794 795 796 797
	.user_irq_get		= bsd_ring_get_user_irq,
	.user_irq_put		= bsd_ring_put_user_irq,
	.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
};
798

799 800

static void gen6_bsd_setup_status_page(struct drm_device *dev,
801
				       struct  intel_ring_buffer *ring)
802 803 804 805 806 807
{
       drm_i915_private_t *dev_priv = dev->dev_private;
       I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr);
       I915_READ(GEN6_BSD_HWS_PGA);
}

808 809 810
static void gen6_bsd_ring_set_tail(struct drm_device *dev,
				   struct intel_ring_buffer *ring,
				   u32 value)
811 812 813 814 815 816 817 818 819 820 821 822 823 824
{
       drm_i915_private_t *dev_priv = dev->dev_private;

       /* Every tail move must follow the sequence below */
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
       I915_WRITE(GEN6_BSD_RNCID, 0x0);

       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
                       50))
               DRM_ERROR("timed out waiting for IDLE Indicator\n");

825
       I915_WRITE_TAIL(ring, value);
826 827 828 829 830
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}

831 832
static unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev,
						  struct intel_ring_buffer *ring)
833 834 835 836 837 838
{
       drm_i915_private_t *dev_priv = dev->dev_private;
       return I915_READ(GEN6_BSD_RING_ACTHD);
}

static void gen6_bsd_ring_flush(struct drm_device *dev,
839 840 841
				struct intel_ring_buffer *ring,
				u32 invalidate_domains,
				u32 flush_domains)
842 843 844 845 846 847 848 849 850 851 852
{
       intel_ring_begin(dev, ring, 4);
       intel_ring_emit(dev, ring, MI_FLUSH_DW);
       intel_ring_emit(dev, ring, 0);
       intel_ring_emit(dev, ring, 0);
       intel_ring_emit(dev, ring, 0);
       intel_ring_advance(dev, ring);
}

static int
gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
853 854 855 856
				      struct intel_ring_buffer *ring,
				      struct drm_i915_gem_execbuffer2 *exec,
				      struct drm_clip_rect *cliprects,
				      uint64_t exec_offset)
857 858
{
       uint32_t exec_start;
859

860
       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
861

862
       intel_ring_begin(dev, ring, 2);
863 864 865
       intel_ring_emit(dev, ring,
		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
       /* bit0-7 is the length on GEN6+ */
866 867
       intel_ring_emit(dev, ring, exec_start);
       intel_ring_advance(dev, ring);
868

869 870 871 872
       return 0;
}

/* ring buffer for Video Codec for Gen6+ */
873
static const struct intel_ring_buffer gen6_bsd_ring = {
874 875
       .name			= "gen6 bsd ring",
       .id			= RING_BSD,
876
       .mmio_base		= GEN6_BSD_RING_BASE,
877 878 879 880 881 882 883
       .size			= 32 * PAGE_SIZE,
       .setup_status_page	= gen6_bsd_setup_status_page,
       .init			= init_bsd_ring,
       .set_tail		= gen6_bsd_ring_set_tail,
       .get_active_head		= gen6_bsd_ring_get_active_head,
       .flush			= gen6_bsd_ring_flush,
       .add_request		= bsd_ring_add_request,
884
       .get_seqno		= bsd_ring_get_seqno,
885 886 887 888 889
       .user_irq_get		= bsd_ring_get_user_irq,
       .user_irq_put		= bsd_ring_put_user_irq,
       .dispatch_gem_execbuffer	= gen6_bsd_ring_dispatch_gem_execbuffer,
};

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
int intel_init_render_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	dev_priv->render_ring = render_ring;

	if (!I915_NEED_GFX_HWS(dev)) {
		dev_priv->render_ring.status_page.page_addr
			= dev_priv->status_page_dmah->vaddr;
		memset(dev_priv->render_ring.status_page.page_addr,
				0, PAGE_SIZE);
	}

	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
}

int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

910 911 912 913
	if (IS_GEN6(dev))
		dev_priv->bsd_ring = gen6_bsd_ring;
	else
		dev_priv->bsd_ring = bsd_ring;
914 915 916

	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
}