intel_ringbuffer.c 24.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drv.h"
33
#include "i915_drm.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 seqno;

	seqno = dev_priv->next_seqno;

	/* reserve 0 for non-seqno */
	if (++dev_priv->next_seqno == 0)
		dev_priv->next_seqno = 1;

	return seqno;
}

51 52
static void
render_ring_flush(struct drm_device *dev,
53 54 55
		  struct intel_ring_buffer *ring,
		  u32	invalidate_domains,
		  u32	flush_domains)
56
{
57 58 59
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 cmd;

60 61 62 63
#if WATCH_EXEC
	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
		  invalidate_domains, flush_domains);
#endif
64 65

	trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
				     invalidate_domains, flush_domains);

	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
		/*
		 * read/write caches:
		 *
		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
		 * also flushed at 2d versus 3d pipeline switches.
		 *
		 * read-only caches:
		 *
		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
		 * MI_READ_FLUSH is set, and is always flushed on 965.
		 *
		 * I915_GEM_DOMAIN_COMMAND may not exist?
		 *
		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
		 * invalidated when MI_EXE_FLUSH is set.
		 *
		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
		 * invalidated with every MI_FLUSH.
		 *
		 * TLBs:
		 *
		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
		 * are flushed at any MI_FLUSH.
		 */

		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
		if ((invalidate_domains|flush_domains) &
		    I915_GEM_DOMAIN_RENDER)
			cmd &= ~MI_NO_WRITE_FLUSH;
101
		if (INTEL_INFO(dev)->gen < 4) {
102 103 104 105 106 107 108 109 110 111 112 113 114
			/*
			 * On the 965, the sampler cache always gets flushed
			 * and this bit is reserved.
			 */
			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
				cmd |= MI_READ_FLUSH;
		}
		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
			cmd |= MI_EXE_FLUSH;

#if WATCH_EXEC
		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
115
		intel_ring_begin(dev, ring, 2);
116 117 118
		intel_ring_emit(dev, ring, cmd);
		intel_ring_emit(dev, ring, MI_NOOP);
		intel_ring_advance(dev, ring);
119
	}
120 121
}

122 123 124
static void ring_write_tail(struct drm_device *dev,
			    struct intel_ring_buffer *ring,
			    u32 value)
125 126
{
	drm_i915_private_t *dev_priv = dev->dev_private;
127
	I915_WRITE_TAIL(ring, value);
128 129
}

130 131
u32 intel_ring_get_active_head(struct drm_device *dev,
			       struct intel_ring_buffer *ring)
132 133
{
	drm_i915_private_t *dev_priv = dev->dev_private;
D
Daniel Vetter 已提交
134 135
	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
			RING_ACTHD(ring->mmio_base) : ACTHD;
136 137 138 139 140

	return I915_READ(acthd_reg);
}

static int init_ring_common(struct drm_device *dev,
141
			    struct intel_ring_buffer *ring)
142 143 144 145 146 147 148
{
	u32 head;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv;
	obj_priv = to_intel_bo(ring->gem_object);

	/* Stop the ring if it's running. */
149
	I915_WRITE_CTL(ring, 0);
150
	I915_WRITE_HEAD(ring, 0);
151
	ring->write_tail(dev, ring, 0);
152 153

	/* Initialize the ring. */
154
	I915_WRITE_START(ring, obj_priv->gtt_offset);
155
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
156 157 158 159 160 161

	/* G45 ring initialization fails to reset head to zero */
	if (head != 0) {
		DRM_ERROR("%s head not reset to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
162
				I915_READ_CTL(ring),
163
				I915_READ_HEAD(ring),
164
				I915_READ_TAIL(ring),
165
				I915_READ_START(ring));
166

167
		I915_WRITE_HEAD(ring, 0);
168 169 170 171

		DRM_ERROR("%s head forced to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
172
				I915_READ_CTL(ring),
173
				I915_READ_HEAD(ring),
174
				I915_READ_TAIL(ring),
175
				I915_READ_START(ring));
176 177
	}

178
	I915_WRITE_CTL(ring,
179 180 181
			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
			| RING_NO_REPORT | RING_VALID);

182
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 184 185 186 187
	/* If the head is still not zero, the ring is dead */
	if (head != 0) {
		DRM_ERROR("%s initialization failed "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
188
				I915_READ_CTL(ring),
189
				I915_READ_HEAD(ring),
190
				I915_READ_TAIL(ring),
191
				I915_READ_START(ring));
192 193 194 195 196 197
		return -EIO;
	}

	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		i915_kernel_lost_context(dev);
	else {
198
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
199
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
200 201 202 203 204 205 206 207
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
			ring->space += ring->size;
	}
	return 0;
}

static int init_render_ring(struct drm_device *dev,
208
			    struct intel_ring_buffer *ring)
209 210 211
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret = init_ring_common(dev, ring);
212 213
	int mode;

214
	if (INTEL_INFO(dev)->gen > 3) {
215 216 217 218
		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
		if (IS_GEN6(dev))
			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
		I915_WRITE(MI_MODE, mode);
219 220 221 222
	}
	return ret;
}

223
#define PIPE_CONTROL_FLUSH(addr)					\
224
do {									\
225
	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
226
		 PIPE_CONTROL_DEPTH_STALL | 2);				\
227 228 229
	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
	OUT_RING(0);							\
	OUT_RING(0);							\
230
} while (0)
231 232 233 234 235 236 237 238 239

/**
 * Creates a new sequence number, emitting a write of it to the status page
 * plus an interrupt, which will trigger i915_user_interrupt_handler.
 *
 * Must be called with struct_lock held.
 *
 * Returned sequence numbers are nonzero on success.
 */
240 241
static u32
render_ring_add_request(struct drm_device *dev,
242 243
			struct intel_ring_buffer *ring,
			u32 flush_domains)
244 245
{
	drm_i915_private_t *dev_priv = dev->dev_private;
246 247 248
	u32 seqno;

	seqno = i915_gem_get_seqno(dev);
249 250 251 252 253 254 255 256 257 258 259 260 261

	if (IS_GEN6(dev)) {
		BEGIN_LP_RING(6);
		OUT_RING(GFX_OP_PIPE_CONTROL | 3);
		OUT_RING(PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
			 PIPE_CONTROL_NOTIFY);
		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		OUT_RING(seqno);
		OUT_RING(0);
		OUT_RING(0);
		ADVANCE_LP_RING();
	} else if (HAS_PIPE_CONTROL(dev)) {
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;

		/*
		 * Workaround qword write incoherence by flushing the
		 * PIPE_NOTIFY buffers out to memory before requesting
		 * an interrupt.
		 */
		BEGIN_LP_RING(32);
		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		OUT_RING(seqno);
		OUT_RING(0);
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128; /* write to separate cachelines */
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(scratch_addr);
		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
			 PIPE_CONTROL_NOTIFY);
		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		OUT_RING(seqno);
		OUT_RING(0);
		ADVANCE_LP_RING();
	} else {
		BEGIN_LP_RING(4);
		OUT_RING(MI_STORE_DWORD_INDEX);
		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
		OUT_RING(seqno);

		OUT_RING(MI_USER_INTERRUPT);
		ADVANCE_LP_RING();
	}
	return seqno;
}

305
static u32
306 307
render_ring_get_seqno(struct drm_device *dev,
		      struct intel_ring_buffer *ring)
308 309 310 311 312 313 314 315 316 317
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	if (HAS_PIPE_CONTROL(dev))
		return ((volatile u32 *)(dev_priv->seqno_page))[0];
	else
		return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static void
render_ring_get_user_irq(struct drm_device *dev,
318
			 struct intel_ring_buffer *ring)
319 320 321 322 323
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
324
	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
325 326 327 328 329 330 331 332
		if (HAS_PCH_SPLIT(dev))
			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
		else
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
	}
	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}

333 334
static void
render_ring_put_user_irq(struct drm_device *dev,
335
			 struct intel_ring_buffer *ring)
336 337 338 339 340
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
341 342
	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
343 344 345 346 347 348 349 350
		if (HAS_PCH_SPLIT(dev))
			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
		else
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
	}
	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}

351 352
void intel_ring_setup_status_page(struct drm_device *dev,
				  struct intel_ring_buffer *ring)
353 354 355
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	if (IS_GEN6(dev)) {
D
Daniel Vetter 已提交
356 357 358
		I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
			   ring->status_page.gfx_addr);
		I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
359
	} else {
D
Daniel Vetter 已提交
360 361 362
		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
			   ring->status_page.gfx_addr);
		I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
363 364 365 366
	}

}

367
static void
368 369 370 371 372
bsd_ring_flush(struct drm_device *dev,
		struct intel_ring_buffer *ring,
		u32     invalidate_domains,
		u32     flush_domains)
{
373
	intel_ring_begin(dev, ring, 2);
374 375 376 377 378 379
	intel_ring_emit(dev, ring, MI_FLUSH);
	intel_ring_emit(dev, ring, MI_NOOP);
	intel_ring_advance(dev, ring);
}

static int init_bsd_ring(struct drm_device *dev,
380
			 struct intel_ring_buffer *ring)
381 382 383 384 385
{
	return init_ring_common(dev, ring);
}

static u32
386 387 388
ring_add_request(struct drm_device *dev,
		 struct intel_ring_buffer *ring,
		 u32 flush_domains)
389 390
{
	u32 seqno;
391 392 393

	seqno = i915_gem_get_seqno(dev);

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	intel_ring_begin(dev, ring, 4);
	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
	intel_ring_emit(dev, ring,
			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(dev, ring, seqno);
	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
	intel_ring_advance(dev, ring);

	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);

	return seqno;
}

static void
bsd_ring_get_user_irq(struct drm_device *dev,
409
		      struct intel_ring_buffer *ring)
410 411 412 413 414
{
	/* do nothing */
}
static void
bsd_ring_put_user_irq(struct drm_device *dev,
415
		      struct intel_ring_buffer *ring)
416 417 418 419 420
{
	/* do nothing */
}

static u32
421 422
ring_status_page_get_seqno(struct drm_device *dev,
			   struct intel_ring_buffer *ring)
423 424 425 426 427
{
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static int
428 429 430 431 432
ring_dispatch_gem_execbuffer(struct drm_device *dev,
			     struct intel_ring_buffer *ring,
			     struct drm_i915_gem_execbuffer2 *exec,
			     struct drm_clip_rect *cliprects,
			     uint64_t exec_offset)
433 434 435 436 437 438 439 440 441 442 443
{
	uint32_t exec_start;
	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
	intel_ring_begin(dev, ring, 2);
	intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
			(2 << 6) | MI_BATCH_NON_SECURE_I965);
	intel_ring_emit(dev, ring, exec_start);
	intel_ring_advance(dev, ring);
	return 0;
}

444 445
static int
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
446 447 448 449
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_execbuffer2 *exec,
				    struct drm_clip_rect *cliprects,
				    uint64_t exec_offset)
450 451 452 453 454 455 456 457
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int nbox = exec->num_cliprects;
	int i = 0, count;
	uint32_t exec_start, exec_len;
	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
	exec_len = (uint32_t) exec->batch_len;

458
	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
459 460 461 462 463 464 465 466 467 468 469 470

	count = nbox ? nbox : 1;

	for (i = 0; i < count; i++) {
		if (i < nbox) {
			int ret = i915_emit_box(dev, cliprects, i,
						exec->DR1, exec->DR4);
			if (ret)
				return ret;
		}

		if (IS_I830(dev) || IS_845G(dev)) {
471 472 473 474 475 476
			intel_ring_begin(dev, ring, 4);
			intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
			intel_ring_emit(dev, ring,
					exec_start | MI_BATCH_NON_SECURE);
			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
			intel_ring_emit(dev, ring, 0);
477
		} else {
478
			intel_ring_begin(dev, ring, 2);
479
			if (INTEL_INFO(dev)->gen >= 4) {
480 481 482 483
				intel_ring_emit(dev, ring,
						MI_BATCH_BUFFER_START | (2 << 6)
						| MI_BATCH_NON_SECURE_I965);
				intel_ring_emit(dev, ring, exec_start);
484
			} else {
485 486 487 488
				intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
						| (2 << 6));
				intel_ring_emit(dev, ring, exec_start |
						MI_BATCH_NON_SECURE);
489 490
			}
		}
491
		intel_ring_advance(dev, ring);
492 493
	}

494
	if (IS_G4X(dev) || IS_GEN5(dev)) {
495 496 497 498 499 500 501
		intel_ring_begin(dev, ring, 2);
		intel_ring_emit(dev, ring, MI_FLUSH |
				MI_NO_WRITE_FLUSH |
				MI_INVALIDATE_ISP );
		intel_ring_emit(dev, ring, MI_NOOP);
		intel_ring_advance(dev, ring);
	}
502
	/* XXX breadcrumb */
503

504 505 506
	return 0;
}

507
static void cleanup_status_page(struct drm_device *dev,
508
				struct intel_ring_buffer *ring)
509 510 511 512 513
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

514 515
	obj = ring->status_page.obj;
	if (obj == NULL)
516 517 518 519 520 521
		return;
	obj_priv = to_intel_bo(obj);

	kunmap(obj_priv->pages[0]);
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(obj);
522
	ring->status_page.obj = NULL;
523 524 525 526

	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}

527
static int init_status_page(struct drm_device *dev,
528
			    struct intel_ring_buffer *ring)
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

	obj = i915_gem_alloc_object(dev, 4096);
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate status page\n");
		ret = -ENOMEM;
		goto err;
	}
	obj_priv = to_intel_bo(obj);
	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;

	ret = i915_gem_object_pin(obj, 4096);
	if (ret != 0) {
		goto err_unref;
	}

549 550 551
	ring->status_page.gfx_addr = obj_priv->gtt_offset;
	ring->status_page.page_addr = kmap(obj_priv->pages[0]);
	if (ring->status_page.page_addr == NULL) {
552 553 554
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
		goto err_unpin;
	}
555 556
	ring->status_page.obj = obj;
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
557

558
	intel_ring_setup_status_page(dev, ring);
559 560
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
			ring->name, ring->status_page.gfx_addr);
561 562 563 564 565 566 567 568

	return 0;

err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
err:
569
	return ret;
570 571
}

572
int intel_init_ring_buffer(struct drm_device *dev,
573
			   struct intel_ring_buffer *ring)
574
{
575
	struct drm_i915_private *dev_priv = dev->dev_private;
576 577
	struct drm_i915_gem_object *obj_priv;
	struct drm_gem_object *obj;
578 579
	int ret;

580
	ring->dev = dev;
581 582
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
583

584 585 586 587 588
	if (I915_NEED_GFX_HWS(dev)) {
		ret = init_status_page(dev, ring);
		if (ret)
			return ret;
	}
589

590
	obj = i915_gem_alloc_object(dev, ring->size);
591 592
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate ringbuffer\n");
593
		ret = -ENOMEM;
594
		goto err_hws;
595 596
	}

597 598
	ring->gem_object = obj;

599
	ret = i915_gem_object_pin(obj, PAGE_SIZE);
600 601
	if (ret)
		goto err_unref;
602

603 604
	obj_priv = to_intel_bo(obj);
	ring->map.size = ring->size;
605 606 607 608 609 610 611 612
	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
	ring->map.type = 0;
	ring->map.flags = 0;
	ring->map.mtrr = 0;

	drm_core_ioremap_wc(&ring->map, dev);
	if (ring->map.handle == NULL) {
		DRM_ERROR("Failed to map ringbuffer.\n");
613
		ret = -EINVAL;
614
		goto err_unpin;
615 616
	}

617 618
	ring->virtual_start = ring->map.handle;
	ret = ring->init(dev, ring);
619 620
	if (ret)
		goto err_unmap;
621 622 623 624

	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		i915_kernel_lost_context(dev);
	else {
625
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
626
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
627 628
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
629
			ring->space += ring->size;
630
	}
631
	return ret;
632 633 634 635 636 637 638 639 640

err_unmap:
	drm_core_ioremapfree(&ring->map, dev);
err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
	ring->gem_object = NULL;
err_hws:
641 642
	cleanup_status_page(dev, ring);
	return ret;
643 644
}

645
void intel_cleanup_ring_buffer(struct drm_device *dev,
646
			       struct intel_ring_buffer *ring)
647
{
648
	if (ring->gem_object == NULL)
649 650
		return;

651
	drm_core_ioremapfree(&ring->map, dev);
652

653 654 655 656
	i915_gem_object_unpin(ring->gem_object);
	drm_gem_object_unreference(ring->gem_object);
	ring->gem_object = NULL;
	cleanup_status_page(dev, ring);
657 658
}

659 660
static int intel_wrap_ring_buffer(struct drm_device *dev,
				  struct intel_ring_buffer *ring)
661
{
662
	unsigned int *virt;
663
	int rem;
664
	rem = ring->size - ring->tail;
665

666 667
	if (ring->space < rem) {
		int ret = intel_wait_ring_buffer(dev, ring, rem);
668 669 670 671
		if (ret)
			return ret;
	}

672
	virt = (unsigned int *)(ring->virtual_start + ring->tail);
673 674
	rem /= 8;
	while (rem--) {
675
		*virt++ = MI_NOOP;
676 677
		*virt++ = MI_NOOP;
	}
678

679
	ring->tail = 0;
680
	ring->space = ring->head - 8;
681 682 683 684

	return 0;
}

685
int intel_wait_ring_buffer(struct drm_device *dev,
686
			   struct intel_ring_buffer *ring, int n)
687
{
688
	unsigned long end;
689
	drm_i915_private_t *dev_priv = dev->dev_private;
690 691

	trace_i915_ring_wait_begin (dev);
692 693
	end = jiffies + 3 * HZ;
	do {
694
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
695 696
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
697
			ring->space += ring->size;
698 699 700 701 702 703 704 705 706 707
		if (ring->space >= n) {
			trace_i915_ring_wait_end (dev);
			return 0;
		}

		if (dev->primary->master) {
			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
			if (master_priv->sarea_priv)
				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
		}
708

709
		msleep(1);
710 711 712 713
	} while (!time_after(jiffies, end));
	trace_i915_ring_wait_end (dev);
	return -EBUSY;
}
714

715
void intel_ring_begin(struct drm_device *dev,
716 717
		      struct intel_ring_buffer *ring,
		      int num_dwords)
718
{
719
	int n = 4*num_dwords;
720 721 722 723
	if (unlikely(ring->tail + n > ring->size))
		intel_wrap_ring_buffer(dev, ring);
	if (unlikely(ring->space < n))
		intel_wait_ring_buffer(dev, ring, n);
724 725

	ring->space -= n;
726
}
727

728
void intel_ring_advance(struct drm_device *dev,
729
			struct intel_ring_buffer *ring)
730
{
731
	ring->tail &= ring->size - 1;
732
	ring->write_tail(dev, ring, ring->tail);
733
}
734

735
static const struct intel_ring_buffer render_ring = {
736
	.name			= "render ring",
737
	.id			= RING_RENDER,
738
	.mmio_base		= RENDER_RING_BASE,
739 740
	.size			= 32 * PAGE_SIZE,
	.init			= init_render_ring,
741
	.write_tail		= ring_write_tail,
742 743
	.flush			= render_ring_flush,
	.add_request		= render_ring_add_request,
744
	.get_seqno		= render_ring_get_seqno,
745 746 747 748
	.user_irq_get		= render_ring_get_user_irq,
	.user_irq_put		= render_ring_put_user_irq,
	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
};
749 750 751

/* ring buffer for bit-stream decoder */

752
static const struct intel_ring_buffer bsd_ring = {
753
	.name                   = "bsd ring",
754
	.id			= RING_BSD,
755
	.mmio_base		= BSD_RING_BASE,
756 757
	.size			= 32 * PAGE_SIZE,
	.init			= init_bsd_ring,
758
	.write_tail		= ring_write_tail,
759
	.flush			= bsd_ring_flush,
760 761
	.add_request		= ring_add_request,
	.get_seqno		= ring_status_page_get_seqno,
762 763
	.user_irq_get		= bsd_ring_get_user_irq,
	.user_irq_put		= bsd_ring_put_user_irq,
764
	.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
765
};
766

767

768 769 770
static void gen6_bsd_ring_write_tail(struct drm_device *dev,
				     struct intel_ring_buffer *ring,
				     u32 value)
771 772 773 774 775 776 777 778 779 780 781 782 783 784
{
       drm_i915_private_t *dev_priv = dev->dev_private;

       /* Every tail move must follow the sequence below */
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
       I915_WRITE(GEN6_BSD_RNCID, 0x0);

       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
                       50))
               DRM_ERROR("timed out waiting for IDLE Indicator\n");

785
       I915_WRITE_TAIL(ring, value);
786 787 788 789 790
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}

791 792 793 794
static void gen6_ring_flush(struct drm_device *dev,
			    struct intel_ring_buffer *ring,
			    u32 invalidate_domains,
			    u32 flush_domains)
795 796 797 798 799 800 801 802 803 804
{
       intel_ring_begin(dev, ring, 4);
       intel_ring_emit(dev, ring, MI_FLUSH_DW);
       intel_ring_emit(dev, ring, 0);
       intel_ring_emit(dev, ring, 0);
       intel_ring_emit(dev, ring, 0);
       intel_ring_advance(dev, ring);
}

static int
805 806 807 808 809
gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
				  struct intel_ring_buffer *ring,
				  struct drm_i915_gem_execbuffer2 *exec,
				  struct drm_clip_rect *cliprects,
				  uint64_t exec_offset)
810 811
{
       uint32_t exec_start;
812

813
       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
814

815
       intel_ring_begin(dev, ring, 2);
816 817 818
       intel_ring_emit(dev, ring,
		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
       /* bit0-7 is the length on GEN6+ */
819 820
       intel_ring_emit(dev, ring, exec_start);
       intel_ring_advance(dev, ring);
821

822 823 824 825
       return 0;
}

/* ring buffer for Video Codec for Gen6+ */
826
static const struct intel_ring_buffer gen6_bsd_ring = {
827 828
       .name			= "gen6 bsd ring",
       .id			= RING_BSD,
829
       .mmio_base		= GEN6_BSD_RING_BASE,
830 831
       .size			= 32 * PAGE_SIZE,
       .init			= init_bsd_ring,
832
       .write_tail		= gen6_bsd_ring_write_tail,
833 834 835
       .flush			= gen6_ring_flush,
       .add_request		= ring_add_request,
       .get_seqno		= ring_status_page_get_seqno,
836 837
       .user_irq_get		= bsd_ring_get_user_irq,
       .user_irq_put		= bsd_ring_put_user_irq,
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
};

/* Blitter support (SandyBridge+) */

static void
blt_ring_get_user_irq(struct drm_device *dev,
		      struct intel_ring_buffer *ring)
{
	/* do nothing */
}
static void
blt_ring_put_user_irq(struct drm_device *dev,
		      struct intel_ring_buffer *ring)
{
	/* do nothing */
}

static const struct intel_ring_buffer gen6_blt_ring = {
       .name			= "blt ring",
       .id			= RING_BLT,
       .mmio_base		= BLT_RING_BASE,
       .size			= 32 * PAGE_SIZE,
       .init			= init_ring_common,
862
       .write_tail		= ring_write_tail,
863 864 865 866 867 868
       .flush			= gen6_ring_flush,
       .add_request		= ring_add_request,
       .get_seqno		= ring_status_page_get_seqno,
       .user_irq_get		= blt_ring_get_user_irq,
       .user_irq_put		= blt_ring_put_user_irq,
       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
869 870
};

871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
int intel_init_render_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	dev_priv->render_ring = render_ring;

	if (!I915_NEED_GFX_HWS(dev)) {
		dev_priv->render_ring.status_page.page_addr
			= dev_priv->status_page_dmah->vaddr;
		memset(dev_priv->render_ring.status_page.page_addr,
				0, PAGE_SIZE);
	}

	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
}

int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

891 892 893 894
	if (IS_GEN6(dev))
		dev_priv->bsd_ring = gen6_bsd_ring;
	else
		dev_priv->bsd_ring = bsd_ring;
895 896 897

	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
}
898 899 900 901 902 903 904 905 906

int intel_init_blt_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	dev_priv->blt_ring = gen6_blt_ring;

	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
}