“e8c48efdb971cfb1b043076cf68be535d461a20b”上不存在“arch/x86/include/asm/numaq/apic.h”
intel_ringbuffer.c 26.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Zou Nan hai <nanhai.zou@intel.com>
 *    Xiang Hai hao<haihao.xiang@intel.com>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drv.h"
33
#include "i915_drm.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 seqno;

	seqno = dev_priv->next_seqno;

	/* reserve 0 for non-seqno */
	if (++dev_priv->next_seqno == 0)
		dev_priv->next_seqno = 1;

	return seqno;
}

51
static void
52
render_ring_flush(struct intel_ring_buffer *ring,
53 54
		  u32	invalidate_domains,
		  u32	flush_domains)
55
{
56
	struct drm_device *dev = ring->dev;
57 58 59
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 cmd;

60 61 62 63
#if WATCH_EXEC
	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
		  invalidate_domains, flush_domains);
#endif
64 65

	trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
				     invalidate_domains, flush_domains);

	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
		/*
		 * read/write caches:
		 *
		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
		 * also flushed at 2d versus 3d pipeline switches.
		 *
		 * read-only caches:
		 *
		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
		 * MI_READ_FLUSH is set, and is always flushed on 965.
		 *
		 * I915_GEM_DOMAIN_COMMAND may not exist?
		 *
		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
		 * invalidated when MI_EXE_FLUSH is set.
		 *
		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
		 * invalidated with every MI_FLUSH.
		 *
		 * TLBs:
		 *
		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
		 * are flushed at any MI_FLUSH.
		 */

		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
		if ((invalidate_domains|flush_domains) &
		    I915_GEM_DOMAIN_RENDER)
			cmd &= ~MI_NO_WRITE_FLUSH;
101
		if (INTEL_INFO(dev)->gen < 4) {
102 103 104 105 106 107 108 109 110 111 112 113 114
			/*
			 * On the 965, the sampler cache always gets flushed
			 * and this bit is reserved.
			 */
			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
				cmd |= MI_READ_FLUSH;
		}
		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
			cmd |= MI_EXE_FLUSH;

#if WATCH_EXEC
		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
115 116 117 118 119
		if (intel_ring_begin(ring, 2) == 0) {
			intel_ring_emit(ring, cmd);
			intel_ring_emit(ring, MI_NOOP);
			intel_ring_advance(ring);
		}
120
	}
121 122
}

123
static void ring_write_tail(struct intel_ring_buffer *ring,
124
			    u32 value)
125
{
126
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
127
	I915_WRITE_TAIL(ring, value);
128 129
}

130
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
131
{
132 133
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
D
Daniel Vetter 已提交
134
			RING_ACTHD(ring->mmio_base) : ACTHD;
135 136 137 138

	return I915_READ(acthd_reg);
}

139
static int init_ring_common(struct intel_ring_buffer *ring)
140
{
141 142
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
	struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object);
143 144 145
	u32 head;

	/* Stop the ring if it's running. */
146
	I915_WRITE_CTL(ring, 0);
147
	I915_WRITE_HEAD(ring, 0);
148
	ring->write_tail(ring, 0);
149 150

	/* Initialize the ring. */
151
	I915_WRITE_START(ring, obj_priv->gtt_offset);
152
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
153 154 155 156 157 158

	/* G45 ring initialization fails to reset head to zero */
	if (head != 0) {
		DRM_ERROR("%s head not reset to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
159
				I915_READ_CTL(ring),
160
				I915_READ_HEAD(ring),
161
				I915_READ_TAIL(ring),
162
				I915_READ_START(ring));
163

164
		I915_WRITE_HEAD(ring, 0);
165 166 167 168

		DRM_ERROR("%s head forced to zero "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
169
				I915_READ_CTL(ring),
170
				I915_READ_HEAD(ring),
171
				I915_READ_TAIL(ring),
172
				I915_READ_START(ring));
173 174
	}

175
	I915_WRITE_CTL(ring,
176
			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
177
			| RING_REPORT_64K | RING_VALID);
178 179

	/* If the head is still not zero, the ring is dead */
180 181 182
	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
	    I915_READ_START(ring) != obj_priv->gtt_offset ||
	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
183 184 185
		DRM_ERROR("%s initialization failed "
				"ctl %08x head %08x tail %08x start %08x\n",
				ring->name,
186
				I915_READ_CTL(ring),
187
				I915_READ_HEAD(ring),
188
				I915_READ_TAIL(ring),
189
				I915_READ_START(ring));
190 191 192
		return -EIO;
	}

193 194
	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
		i915_kernel_lost_context(ring->dev);
195
	else {
196
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
197
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
198 199 200 201 202 203 204
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
			ring->space += ring->size;
	}
	return 0;
}

205
static int init_render_ring(struct intel_ring_buffer *ring)
206
{
207 208
	struct drm_device *dev = ring->dev;
	int ret = init_ring_common(ring);
209

210
	if (INTEL_INFO(dev)->gen > 3) {
211 212
		drm_i915_private_t *dev_priv = dev->dev_private;
		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
213 214 215
		if (IS_GEN6(dev))
			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
		I915_WRITE(MI_MODE, mode);
216
	}
217

218 219 220
	return ret;
}

221
#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
222
do {									\
223
	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
224
		 PIPE_CONTROL_DEPTH_STALL | 2);				\
225 226 227
	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
	intel_ring_emit(ring__, 0);							\
	intel_ring_emit(ring__, 0);							\
228
} while (0)
229 230 231 232 233 234 235 236 237

/**
 * Creates a new sequence number, emitting a write of it to the status page
 * plus an interrupt, which will trigger i915_user_interrupt_handler.
 *
 * Must be called with struct_lock held.
 *
 * Returned sequence numbers are nonzero on success.
 */
238
static int
239
render_ring_add_request(struct intel_ring_buffer *ring,
240
			u32 *result)
241
{
242
	struct drm_device *dev = ring->dev;
243
	drm_i915_private_t *dev_priv = dev->dev_private;
244 245
	u32 seqno = i915_gem_get_seqno(dev);
	int ret;
246 247

	if (IS_GEN6(dev)) {
248 249 250 251 252 253 254 255 256 257 258 259
		ret = intel_ring_begin(ring, 6);
		if (ret)
		    return ret;

		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3);
		intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE |
				PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
				PIPE_CONTROL_NOTIFY);
		intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		intel_ring_emit(ring, seqno);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
260
	} else if (HAS_PIPE_CONTROL(dev)) {
261 262 263 264 265 266 267
		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;

		/*
		 * Workaround qword write incoherence by flushing the
		 * PIPE_NOTIFY buffers out to memory before requesting
		 * an interrupt.
		 */
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
		ret = intel_ring_begin(ring, 32);
		if (ret)
			return ret;

		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
				PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
		intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		intel_ring_emit(ring, seqno);
		intel_ring_emit(ring, 0);
		PIPE_CONTROL_FLUSH(ring, scratch_addr);
		scratch_addr += 128; /* write to separate cachelines */
		PIPE_CONTROL_FLUSH(ring, scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(ring, scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(ring, scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(ring, scratch_addr);
		scratch_addr += 128;
		PIPE_CONTROL_FLUSH(ring, scratch_addr);
		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
				PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
				PIPE_CONTROL_NOTIFY);
		intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
		intel_ring_emit(ring, seqno);
		intel_ring_emit(ring, 0);
294
	} else {
295 296 297
		ret = intel_ring_begin(ring, 4);
		if (ret)
		    return ret;
298

299 300 301 302 303
		intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
		intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
		intel_ring_emit(ring, seqno);

		intel_ring_emit(ring, MI_USER_INTERRUPT);
304
	}
305 306 307 308

	intel_ring_advance(ring);
	*result = seqno;
	return 0;
309 310
}

311
static u32
312
render_ring_get_seqno(struct intel_ring_buffer *ring)
313
{
314
	struct drm_device *dev = ring->dev;
315 316 317 318 319 320 321 322
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	if (HAS_PIPE_CONTROL(dev))
		return ((volatile u32 *)(dev_priv->seqno_page))[0];
	else
		return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static void
323
render_ring_get_user_irq(struct intel_ring_buffer *ring)
324
{
325
	struct drm_device *dev = ring->dev;
326 327 328 329
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
330
	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
331 332 333 334 335 336 337 338
		if (HAS_PCH_SPLIT(dev))
			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
		else
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
	}
	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}

339
static void
340
render_ring_put_user_irq(struct intel_ring_buffer *ring)
341
{
342
	struct drm_device *dev = ring->dev;
343 344 345 346
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
347 348
	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
349 350 351 352 353 354 355 356
		if (HAS_PCH_SPLIT(dev))
			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
		else
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
	}
	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}

357
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
358
{
359 360 361 362 363 364
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
	u32 mmio = IS_GEN6(ring->dev) ?
		RING_HWS_PGA_GEN6(ring->mmio_base) :
		RING_HWS_PGA(ring->mmio_base);
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
	POSTING_READ(mmio);
365 366
}

367
static void
368 369 370
bsd_ring_flush(struct intel_ring_buffer *ring,
	       u32     invalidate_domains,
	       u32     flush_domains)
371
{
372 373 374 375 376
	if (intel_ring_begin(ring, 2) == 0) {
		intel_ring_emit(ring, MI_FLUSH);
		intel_ring_emit(ring, MI_NOOP);
		intel_ring_advance(ring);
	}
377 378
}

379
static int
380
ring_add_request(struct intel_ring_buffer *ring,
381
		 u32 *result)
382 383
{
	u32 seqno;
384 385 386 387 388
	int ret;

	ret = intel_ring_begin(ring, 4);
	if (ret)
		return ret;
389

390
	seqno = i915_gem_get_seqno(ring->dev);
391

392 393 394 395 396
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(ring, seqno);
	intel_ring_emit(ring, MI_USER_INTERRUPT);
	intel_ring_advance(ring);
397 398

	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
399 400
	*result = seqno;
	return 0;
401 402 403
}

static void
404
bsd_ring_get_user_irq(struct intel_ring_buffer *ring)
405 406 407 408
{
	/* do nothing */
}
static void
409
bsd_ring_put_user_irq(struct intel_ring_buffer *ring)
410 411 412 413 414
{
	/* do nothing */
}

static u32
415
ring_status_page_get_seqno(struct intel_ring_buffer *ring)
416 417 418 419 420
{
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static int
421 422 423 424
ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
			 struct drm_i915_gem_execbuffer2 *exec,
			 struct drm_clip_rect *cliprects,
			 uint64_t exec_offset)
425 426
{
	uint32_t exec_start;
427
	int ret;
428

429
	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
430

431 432 433 434
	ret = intel_ring_begin(ring, 2);
	if (ret)
		return ret;

435 436 437 438 439 440 441
	intel_ring_emit(ring,
			MI_BATCH_BUFFER_START |
			(2 << 6) |
			MI_BATCH_NON_SECURE_I965);
	intel_ring_emit(ring, exec_start);
	intel_ring_advance(ring);

442 443 444
	return 0;
}

445
static int
446 447 448 449
render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
				struct drm_i915_gem_execbuffer2 *exec,
				struct drm_clip_rect *cliprects,
				uint64_t exec_offset)
450
{
451
	struct drm_device *dev = ring->dev;
452 453 454
	drm_i915_private_t *dev_priv = dev->dev_private;
	int nbox = exec->num_cliprects;
	uint32_t exec_start, exec_len;
455
	int i, count, ret;
456

457 458 459
	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
	exec_len = (uint32_t) exec->batch_len;

460
	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
461 462 463 464

	count = nbox ? nbox : 1;
	for (i = 0; i < count; i++) {
		if (i < nbox) {
465 466
			ret = i915_emit_box(dev, cliprects, i,
					    exec->DR1, exec->DR4);
467 468 469 470 471
			if (ret)
				return ret;
		}

		if (IS_I830(dev) || IS_845G(dev)) {
472 473 474 475
			ret = intel_ring_begin(ring, 4);
			if (ret)
				return ret;

476 477 478 479
			intel_ring_emit(ring, MI_BATCH_BUFFER);
			intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE);
			intel_ring_emit(ring, exec_start + exec_len - 4);
			intel_ring_emit(ring, 0);
480
		} else {
481 482 483 484
			ret = intel_ring_begin(ring, 2);
			if (ret)
				return ret;

485
			if (INTEL_INFO(dev)->gen >= 4) {
486
				intel_ring_emit(ring,
487 488
						MI_BATCH_BUFFER_START | (2 << 6)
						| MI_BATCH_NON_SECURE_I965);
489
				intel_ring_emit(ring, exec_start);
490
			} else {
491
				intel_ring_emit(ring, MI_BATCH_BUFFER_START
492
						| (2 << 6));
493
				intel_ring_emit(ring, exec_start |
494
						MI_BATCH_NON_SECURE);
495 496
			}
		}
497
		intel_ring_advance(ring);
498 499
	}

500
	if (IS_G4X(dev) || IS_GEN5(dev)) {
501 502 503 504 505 506 507
		if (intel_ring_begin(ring, 2) == 0) {
			intel_ring_emit(ring, MI_FLUSH |
					MI_NO_WRITE_FLUSH |
					MI_INVALIDATE_ISP );
			intel_ring_emit(ring, MI_NOOP);
			intel_ring_advance(ring);
		}
508
	}
509
	/* XXX breadcrumb */
510

511 512 513
	return 0;
}

514
static void cleanup_status_page(struct intel_ring_buffer *ring)
515
{
516
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
517 518 519
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

520 521
	obj = ring->status_page.obj;
	if (obj == NULL)
522 523 524 525 526 527
		return;
	obj_priv = to_intel_bo(obj);

	kunmap(obj_priv->pages[0]);
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(obj);
528
	ring->status_page.obj = NULL;
529 530 531 532

	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}

533
static int init_status_page(struct intel_ring_buffer *ring)
534
{
535
	struct drm_device *dev = ring->dev;
536 537 538 539 540 541 542 543 544 545 546 547 548 549
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

	obj = i915_gem_alloc_object(dev, 4096);
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate status page\n");
		ret = -ENOMEM;
		goto err;
	}
	obj_priv = to_intel_bo(obj);
	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;

550
	ret = i915_gem_object_pin(obj, 4096, true);
551 552 553 554
	if (ret != 0) {
		goto err_unref;
	}

555 556 557
	ring->status_page.gfx_addr = obj_priv->gtt_offset;
	ring->status_page.page_addr = kmap(obj_priv->pages[0]);
	if (ring->status_page.page_addr == NULL) {
558 559 560
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
		goto err_unpin;
	}
561 562
	ring->status_page.obj = obj;
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
563

564
	intel_ring_setup_status_page(ring);
565 566
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
			ring->name, ring->status_page.gfx_addr);
567 568 569 570 571 572 573 574

	return 0;

err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
err:
575
	return ret;
576 577
}

578
int intel_init_ring_buffer(struct drm_device *dev,
579
			   struct intel_ring_buffer *ring)
580
{
581 582
	struct drm_i915_gem_object *obj_priv;
	struct drm_gem_object *obj;
583 584
	int ret;

585
	ring->dev = dev;
586 587
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
588
	INIT_LIST_HEAD(&ring->gpu_write_list);
589

590
	if (I915_NEED_GFX_HWS(dev)) {
591
		ret = init_status_page(ring);
592 593 594
		if (ret)
			return ret;
	}
595

596
	obj = i915_gem_alloc_object(dev, ring->size);
597 598
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate ringbuffer\n");
599
		ret = -ENOMEM;
600
		goto err_hws;
601 602
	}

603 604
	ring->gem_object = obj;

605
	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
606 607
	if (ret)
		goto err_unref;
608

609 610
	obj_priv = to_intel_bo(obj);
	ring->map.size = ring->size;
611 612 613 614 615 616 617 618
	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
	ring->map.type = 0;
	ring->map.flags = 0;
	ring->map.mtrr = 0;

	drm_core_ioremap_wc(&ring->map, dev);
	if (ring->map.handle == NULL) {
		DRM_ERROR("Failed to map ringbuffer.\n");
619
		ret = -EINVAL;
620
		goto err_unpin;
621 622
	}

623
	ring->virtual_start = ring->map.handle;
624
	ret = ring->init(ring);
625 626
	if (ret)
		goto err_unmap;
627

628
	return 0;
629 630 631 632 633 634 635 636 637

err_unmap:
	drm_core_ioremapfree(&ring->map, dev);
err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
	ring->gem_object = NULL;
err_hws:
638
	cleanup_status_page(ring);
639
	return ret;
640 641
}

642
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
643
{
644 645 646
	struct drm_i915_private *dev_priv;
	int ret;

647
	if (ring->gem_object == NULL)
648 649
		return;

650 651 652 653 654
	/* Disable the ring buffer. The ring must be idle at this point */
	dev_priv = ring->dev->dev_private;
	ret = intel_wait_ring_buffer(ring, ring->size - 8);
	I915_WRITE_CTL(ring, 0);

655
	drm_core_ioremapfree(&ring->map, ring->dev);
656

657 658 659
	i915_gem_object_unpin(ring->gem_object);
	drm_gem_object_unreference(ring->gem_object);
	ring->gem_object = NULL;
660

Z
Zou Nan hai 已提交
661 662 663
	if (ring->cleanup)
		ring->cleanup(ring);

664
	cleanup_status_page(ring);
665 666
}

667
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
668
{
669
	unsigned int *virt;
670
	int rem;
671
	rem = ring->size - ring->tail;
672

673
	if (ring->space < rem) {
674
		int ret = intel_wait_ring_buffer(ring, rem);
675 676 677 678
		if (ret)
			return ret;
	}

679
	virt = (unsigned int *)(ring->virtual_start + ring->tail);
680 681
	rem /= 8;
	while (rem--) {
682
		*virt++ = MI_NOOP;
683 684
		*virt++ = MI_NOOP;
	}
685

686
	ring->tail = 0;
687
	ring->space = ring->head - 8;
688 689 690 691

	return 0;
}

692
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
693
{
694
	struct drm_device *dev = ring->dev;
695
	drm_i915_private_t *dev_priv = dev->dev_private;
696
	unsigned long end;
697 698 699 700 701 702 703 704 705 706 707
	u32 head;

	head = intel_read_status_page(ring, 4);
	if (head) {
		ring->head = head & HEAD_ADDR;
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
			ring->space += ring->size;
		if (ring->space >= n)
			return 0;
	}
708 709

	trace_i915_ring_wait_begin (dev);
710 711
	end = jiffies + 3 * HZ;
	do {
712
		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
713 714
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
715
			ring->space += ring->size;
716
		if (ring->space >= n) {
717
			trace_i915_ring_wait_end(dev);
718 719 720 721 722 723 724 725
			return 0;
		}

		if (dev->primary->master) {
			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
			if (master_priv->sarea_priv)
				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
		}
726

727
		msleep(1);
728 729
		if (atomic_read(&dev_priv->mm.wedged))
			return -EAGAIN;
730 731 732 733
	} while (!time_after(jiffies, end));
	trace_i915_ring_wait_end (dev);
	return -EBUSY;
}
734

735 736
int intel_ring_begin(struct intel_ring_buffer *ring,
		     int num_dwords)
737
{
738
	int n = 4*num_dwords;
739
	int ret;
740

741 742 743 744 745
	if (unlikely(ring->tail + n > ring->size)) {
		ret = intel_wrap_ring_buffer(ring);
		if (unlikely(ret))
			return ret;
	}
746

747 748 749 750 751
	if (unlikely(ring->space < n)) {
		ret = intel_wait_ring_buffer(ring, n);
		if (unlikely(ret))
			return ret;
	}
752 753

	ring->space -= n;
754
	return 0;
755
}
756

757
void intel_ring_advance(struct intel_ring_buffer *ring)
758
{
759
	ring->tail &= ring->size - 1;
760
	ring->write_tail(ring, ring->tail);
761
}
762

763
static const struct intel_ring_buffer render_ring = {
764
	.name			= "render ring",
765
	.id			= RING_RENDER,
766
	.mmio_base		= RENDER_RING_BASE,
767 768
	.size			= 32 * PAGE_SIZE,
	.init			= init_render_ring,
769
	.write_tail		= ring_write_tail,
770 771
	.flush			= render_ring_flush,
	.add_request		= render_ring_add_request,
772
	.get_seqno		= render_ring_get_seqno,
773 774
	.user_irq_get		= render_ring_get_user_irq,
	.user_irq_put		= render_ring_put_user_irq,
775
	.dispatch_execbuffer	= render_ring_dispatch_execbuffer,
776
};
777 778 779

/* ring buffer for bit-stream decoder */

780
static const struct intel_ring_buffer bsd_ring = {
781
	.name                   = "bsd ring",
782
	.id			= RING_BSD,
783
	.mmio_base		= BSD_RING_BASE,
784
	.size			= 32 * PAGE_SIZE,
785
	.init			= init_ring_common,
786
	.write_tail		= ring_write_tail,
787
	.flush			= bsd_ring_flush,
788 789
	.add_request		= ring_add_request,
	.get_seqno		= ring_status_page_get_seqno,
790 791
	.user_irq_get		= bsd_ring_get_user_irq,
	.user_irq_put		= bsd_ring_put_user_irq,
792
	.dispatch_execbuffer	= ring_dispatch_execbuffer,
793
};
794

795

796
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
797
				     u32 value)
798
{
799
       drm_i915_private_t *dev_priv = ring->dev->dev_private;
800 801 802 803 804 805 806 807 808 809 810 811

       /* Every tail move must follow the sequence below */
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
       I915_WRITE(GEN6_BSD_RNCID, 0x0);

       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
                       50))
               DRM_ERROR("timed out waiting for IDLE Indicator\n");

812
       I915_WRITE_TAIL(ring, value);
813 814 815 816 817
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}

818
static void gen6_ring_flush(struct intel_ring_buffer *ring,
819 820
			    u32 invalidate_domains,
			    u32 flush_domains)
821
{
822 823 824 825 826 827 828
	if (intel_ring_begin(ring, 4) == 0) {
		intel_ring_emit(ring, MI_FLUSH_DW);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_advance(ring);
	}
829 830 831
}

static int
832 833 834 835
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
			      struct drm_i915_gem_execbuffer2 *exec,
			      struct drm_clip_rect *cliprects,
			      uint64_t exec_offset)
836 837
{
       uint32_t exec_start;
838
       int ret;
839

840
       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
841

842 843 844 845
       ret = intel_ring_begin(ring, 2);
       if (ret)
	       return ret;

846
       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
847
       /* bit0-7 is the length on GEN6+ */
848 849
       intel_ring_emit(ring, exec_start);
       intel_ring_advance(ring);
850

851 852 853 854
       return 0;
}

/* ring buffer for Video Codec for Gen6+ */
855
static const struct intel_ring_buffer gen6_bsd_ring = {
856 857
       .name			= "gen6 bsd ring",
       .id			= RING_BSD,
858
       .mmio_base		= GEN6_BSD_RING_BASE,
859
       .size			= 32 * PAGE_SIZE,
860
       .init			= init_ring_common,
861
       .write_tail		= gen6_bsd_ring_write_tail,
862 863 864
       .flush			= gen6_ring_flush,
       .add_request		= ring_add_request,
       .get_seqno		= ring_status_page_get_seqno,
865 866
       .user_irq_get		= bsd_ring_get_user_irq,
       .user_irq_put		= bsd_ring_put_user_irq,
867
       .dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
868 869 870 871 872
};

/* Blitter support (SandyBridge+) */

static void
873
blt_ring_get_user_irq(struct intel_ring_buffer *ring)
874 875 876 877
{
	/* do nothing */
}
static void
878
blt_ring_put_user_irq(struct intel_ring_buffer *ring)
879 880 881 882
{
	/* do nothing */
}

Z
Zou Nan hai 已提交
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901

/* Workaround for some stepping of SNB,
 * each time when BLT engine ring tail moved,
 * the first command in the ring to be parsed
 * should be MI_BATCH_BUFFER_START
 */
#define NEED_BLT_WORKAROUND(dev) \
	(IS_GEN6(dev) && (dev->pdev->revision < 8))

static inline struct drm_i915_gem_object *
to_blt_workaround(struct intel_ring_buffer *ring)
{
	return ring->private;
}

static int blt_ring_init(struct intel_ring_buffer *ring)
{
	if (NEED_BLT_WORKAROUND(ring->dev)) {
		struct drm_i915_gem_object *obj;
902
		u32 *ptr;
Z
Zou Nan hai 已提交
903 904 905 906 907 908
		int ret;

		obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096));
		if (obj == NULL)
			return -ENOMEM;

909
		ret = i915_gem_object_pin(&obj->base, 4096, true);
Z
Zou Nan hai 已提交
910 911 912 913 914 915
		if (ret) {
			drm_gem_object_unreference(&obj->base);
			return ret;
		}

		ptr = kmap(obj->pages[0]);
916 917
		*ptr++ = MI_BATCH_BUFFER_END;
		*ptr++ = MI_NOOP;
Z
Zou Nan hai 已提交
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
		kunmap(obj->pages[0]);

		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
		if (ret) {
			i915_gem_object_unpin(&obj->base);
			drm_gem_object_unreference(&obj->base);
			return ret;
		}

		ring->private = obj;
	}

	return init_ring_common(ring);
}

static int blt_ring_begin(struct intel_ring_buffer *ring,
			  int num_dwords)
{
	if (ring->private) {
		int ret = intel_ring_begin(ring, num_dwords+2);
		if (ret)
			return ret;

		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);

		return 0;
	} else
		return intel_ring_begin(ring, 4);
}

static void blt_ring_flush(struct intel_ring_buffer *ring,
			   u32 invalidate_domains,
			   u32 flush_domains)
{
	if (blt_ring_begin(ring, 4) == 0) {
		intel_ring_emit(ring, MI_FLUSH_DW);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_advance(ring);
	}
}

static int
blt_ring_add_request(struct intel_ring_buffer *ring,
		     u32 *result)
{
	u32 seqno;
	int ret;

	ret = blt_ring_begin(ring, 4);
	if (ret)
		return ret;

	seqno = i915_gem_get_seqno(ring->dev);

	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(ring, seqno);
	intel_ring_emit(ring, MI_USER_INTERRUPT);
	intel_ring_advance(ring);

	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
	*result = seqno;
	return 0;
}

static void blt_ring_cleanup(struct intel_ring_buffer *ring)
{
	if (!ring->private)
		return;

	i915_gem_object_unpin(ring->private);
	drm_gem_object_unreference(ring->private);
	ring->private = NULL;
}

996 997 998 999 1000
static const struct intel_ring_buffer gen6_blt_ring = {
       .name			= "blt ring",
       .id			= RING_BLT,
       .mmio_base		= BLT_RING_BASE,
       .size			= 32 * PAGE_SIZE,
Z
Zou Nan hai 已提交
1001
       .init			= blt_ring_init,
1002
       .write_tail		= ring_write_tail,
Z
Zou Nan hai 已提交
1003 1004
       .flush			= blt_ring_flush,
       .add_request		= blt_ring_add_request,
1005 1006 1007
       .get_seqno		= ring_status_page_get_seqno,
       .user_irq_get		= blt_ring_get_user_irq,
       .user_irq_put		= blt_ring_put_user_irq,
1008
       .dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
Z
Zou Nan hai 已提交
1009
       .cleanup			= blt_ring_cleanup,
1010 1011
};

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
int intel_init_render_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	dev_priv->render_ring = render_ring;

	if (!I915_NEED_GFX_HWS(dev)) {
		dev_priv->render_ring.status_page.page_addr
			= dev_priv->status_page_dmah->vaddr;
		memset(dev_priv->render_ring.status_page.page_addr,
				0, PAGE_SIZE);
	}

	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
}

int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

1032 1033 1034 1035
	if (IS_GEN6(dev))
		dev_priv->bsd_ring = gen6_bsd_ring;
	else
		dev_priv->bsd_ring = bsd_ring;
1036 1037 1038

	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
}
1039 1040 1041 1042 1043 1044 1045 1046 1047

int intel_init_blt_ring_buffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	dev_priv->blt_ring = gen6_blt_ring;

	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
}