intel_ringbuffer.h 30.3 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4
#include <linux/hashtable.h>
5
#include "i915_gem_batch_pool.h"
6
#include "i915_gem_request.h"
7
#include "i915_gem_timeline.h"
8
#include "i915_selftest.h"
9

10 11
struct drm_printer;

12 13
#define I915_CMD_HASH_ORDER 9

14 15 16 17 18 19
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
20
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
21

22 23 24 25
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
26 27
};

28 29
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
30

31 32
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
33

34 35
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
36

37 38
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
39

40 41
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
42

43 44
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
45

46 47 48
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
49 50 51
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
52
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
53
	(dev_priv->semaphore->node.start + \
54
	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
55
#define GEN8_WAIT_OFFSET(__ring, from)			     \
56
	(dev_priv->semaphore->node.start + \
57
	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
58

59
enum intel_engine_hangcheck_action {
60 61 62 63 64 65 66
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
67
};
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
#define I915_MAX_SLICES	3
#define I915_MAX_SUBSLICES 3

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

111 112 113 114
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
115 116
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
117 118
};

119
struct intel_engine_hangcheck {
120
	u64 acthd;
121
	u32 seqno;
122
	enum intel_engine_hangcheck_action action;
123
	unsigned long action_timestamp;
124
	int deadlock;
125
	struct intel_instdone instdone;
126
	struct drm_i915_gem_request *active_request;
127
	bool stalled;
128 129
};

130
struct intel_ring {
131
	struct i915_vma *vma;
132
	void *vaddr;
133

134 135
	struct list_head request_list;

136 137
	u32 head;
	u32 tail;
138
	u32 emit;
139

140 141 142
	u32 space;
	u32 size;
	u32 effective_size;
143 144
};

145
struct i915_gem_context;
146
struct drm_i915_reg_table;
147

148 149 150 151 152 153 154 155 156 157 158
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
159
struct i915_ctx_workarounds {
160 161 162 163
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
164
	struct i915_vma *vma;
165 166
};

167 168
struct drm_i915_gem_request;

169 170 171 172 173 174 175 176 177 178 179 180 181
/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS = 0,
	BCS,
	VCS,
	VCS2,
#define _VCS(n) (VCS + (n))
	VECS
};

182 183 184 185 186 187
struct i915_priolist {
	struct rb_node node;
	struct list_head requests;
	int priority;
};

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
/**
 * struct intel_engine_execlists - execlist submission queue and port state
 *
 * The struct intel_engine_execlists represents the combined logical state of
 * driver and the hardware state for execlist mode of submission.
 */
struct intel_engine_execlists {
	/**
	 * @irq_tasklet: softirq tasklet for bottom handler
	 */
	struct tasklet_struct irq_tasklet;

	/**
	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
	 */
	struct i915_priolist default_priolist;

	/**
	 * @no_priolist: priority lists disabled
	 */
	bool no_priolist;

	/**
	 * @port: execlist port states
	 *
	 * For each hardware ELSP (ExecList Submission Port) we keep
	 * track of the last request and the number of times we submitted
	 * that port to hw. We then count the number of times the hw reports
	 * a context completion or preemption. As only one context can
	 * be active on hw, we limit resubmission of context to port[0]. This
	 * is called Lite Restore, of the context.
	 */
	struct execlist_port {
		/**
		 * @request_count: combined request and submission count
		 */
		struct drm_i915_gem_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
232
#define port_index(p, execlists) ((p) - (execlists)->port)
233 234 235 236 237

		/**
		 * @context_id: context ID for port
		 */
		GEM_DEBUG_DECL(u32 context_id);
238 239 240 241

#define EXECLIST_MAX_PORTS 2
	} port[EXECLIST_MAX_PORTS];

C
Chris Wilson 已提交
242
	/**
243 244 245 246 247 248 249
	 * @active: is the HW active? We consider the HW as active after
	 * submitting any context for execution and until we have seen the
	 * last context completion event. After that, we do not expect any
	 * more events until we submit, and so can park the HW.
	 *
	 * As we have a small number of different sources from which we feed
	 * the HW, we track the state of each inside a single bitfield.
C
Chris Wilson 已提交
250
	 */
251 252 253
	unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
C
Chris Wilson 已提交
254

255 256 257 258
	/**
	 * @port_mask: number of execlist ports - 1
	 */
	unsigned int port_mask;
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

	/**
	 * @queue: queue of requests, in priority lists
	 */
	struct rb_root queue;

	/**
	 * @first: leftmost level in priority @queue
	 */
	struct rb_node *first;

	/**
	 * @fw_domains: forcewake domains for irq tasklet
	 */
	unsigned int fw_domains;

	/**
	 * @csb_head: context status buffer head
	 */
	unsigned int csb_head;

	/**
	 * @csb_use_mmio: access csb through mmio, instead of hwsp
	 */
	bool csb_use_mmio;
};

286 287
#define INTEL_ENGINE_CS_MAX_NAME 8

288 289
struct intel_engine_cs {
	struct drm_i915_private *i915;
290
	char name[INTEL_ENGINE_CS_MAX_NAME];
291

292 293
	enum intel_engine_id id;
	unsigned int hw_id;
294
	unsigned int guc_id;
295

296 297 298
	u8 uabi_id;
	u8 uabi_class;

299 300
	u8 class;
	u8 instance;
301 302
	u32 context_size;
	u32 mmio_base;
303
	unsigned int irq_shift;
304

305
	struct intel_ring *buffer;
306
	struct intel_timeline *timeline;
307

308
	struct drm_i915_gem_object *default_state;
309

310
	atomic_t irq_count;
311 312
	unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
313
#define ENGINE_IRQ_EXECLIST 1
314

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
332 333 334 335
		spinlock_t irq_lock; /* protects irq_*; irqsafe */
		struct intel_wait *irq_wait; /* oldest waiter by retirement */

		spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
336
		struct rb_root waiters; /* sorted by retirement, priority */
337 338
		struct rb_root signals; /* sorted by retirement */
		struct task_struct *signaler; /* used for fence signalling */
339
		struct drm_i915_gem_request __rcu *first_signal;
340
		struct timer_list fake_irq; /* used after a missed interrupt */
341 342
		struct timer_list hangcheck; /* detect missed interrupts */

343
		unsigned int hangcheck_interrupts;
344
		unsigned int irq_enabled;
345

346
		bool irq_armed : 1;
347
		I915_SELFTEST_DECLARE(bool mock : 1);
348 349
	} breadcrumbs;

350 351 352 353 354 355 356
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

357
	struct intel_hw_status_page status_page;
358
	struct i915_ctx_workarounds wa_ctx;
359
	struct i915_vma *scratch;
360

361 362
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
363 364
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
365

366
	int		(*init_hw)(struct intel_engine_cs *engine);
367 368
	void		(*reset_hw)(struct intel_engine_cs *engine,
				    struct drm_i915_gem_request *req);
369

370 371 372
	void		(*park)(struct intel_engine_cs *engine);
	void		(*unpark)(struct intel_engine_cs *engine);

373 374
	void		(*set_default_submission)(struct intel_engine_cs *engine);

375 376
	struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
					  struct i915_gem_context *ctx);
377 378
	void		(*context_unpin)(struct intel_engine_cs *engine,
					 struct i915_gem_context *ctx);
379
	int		(*request_alloc)(struct drm_i915_gem_request *req);
380
	int		(*init_context)(struct drm_i915_gem_request *req);
381

382 383 384 385 386 387 388 389 390 391 392
	int		(*emit_flush)(struct drm_i915_gem_request *request,
				      u32 mode);
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
C
Chris Wilson 已提交
393
	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
394
					   u32 *cs);
395
	int		emit_breadcrumb_sz;
396 397 398 399 400 401 402

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
403
	void		(*submit_request)(struct drm_i915_gem_request *req);
404

405 406 407 408 409 410 411 412 413
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
	void		(*schedule)(struct drm_i915_gem_request *request,
				    int priority);

414 415 416 417 418 419 420 421
	/*
	 * Cancel all requests on the hardware, or queued for execution.
	 * This should only cancel the ready requests that have been
	 * submitted to the engine (via the engine->submit_request callback).
	 * This is called when marking the device as wedged.
	 */
	void		(*cancel_requests)(struct intel_engine_cs *engine);

422 423 424 425 426 427
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
428 429
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
430

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
468
	struct {
469
		union {
470 471 472
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
473 474
			struct {
				/* our mbox written by others */
475
				u32		wait[GEN6_NUM_SEMAPHORES];
476
				/* mboxes this ring signals to */
477
				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
478
			} mbox;
479
			u64		signal_ggtt[I915_NUM_ENGINES];
480
		};
481 482

		/* AKA wait() */
483 484
		int	(*sync_to)(struct drm_i915_gem_request *req,
				   struct drm_i915_gem_request *signal);
485
		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
486
	} semaphore;
487

488
	struct intel_engine_execlists execlists;
489

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
	struct i915_gem_context *last_retired_context;

	/* We track the current MI_SET_CONTEXT in order to eliminate
	 * redudant context switches. This presumes that requests are not
	 * reordered! Or when they are the tracking is updated along with
	 * the emission of individual requests into the legacy command
	 * stream (ring).
	 */
	struct i915_gem_context *legacy_active_context;
508

509 510 511
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

512
	struct intel_engine_hangcheck hangcheck;
513

514 515
	bool needs_cmd_parser;

516
	/*
517
	 * Table of commands the command parser needs to know about
518
	 * for this engine.
519
	 */
520
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
521 522 523 524

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
525 526
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
527 528 529 530 531

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
532
	 * If the command parser finds an entry for a command in the engine's
533
	 * cmd_tables, it gets the command's length based on the table entry.
534 535 536
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
537 538
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
539 540
};

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
		     unsigned int bit)
{
	__set_bit(bit, (unsigned long *)&execlists->active);
}

static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
		       unsigned int bit)
{
	__clear_bit(bit, (unsigned long *)&execlists->active);
}

static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
		    unsigned int bit)
{
	return test_bit(bit, (unsigned long *)&execlists->active);
}

562 563 564 565 566 567
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);

void
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);

568 569 570 571 572 573
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
	return execlists->port_mask + 1;
}

574 575 576 577
static inline void
execlists_port_complete(struct intel_engine_execlists * const execlists,
			struct execlist_port * const port)
{
578
	const unsigned int m = execlists->port_mask;
579 580

	GEM_BUG_ON(port_index(port, execlists) != 0);
581
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
582

583 584
	memmove(port, port + 1, m * sizeof(struct execlist_port));
	memset(port + m, 0, sizeof(struct execlist_port));
585 586
}

587
static inline unsigned int
588
intel_engine_flag(const struct intel_engine_cs *engine)
589
{
590
	return BIT(engine->id);
591 592
}

593
static inline u32
594
intel_read_status_page(struct intel_engine_cs *engine, int reg)
595
{
596
	/* Ensure that the compiler doesn't optimize away the load. */
597
	return READ_ONCE(engine->status_page.page_addr[reg]);
598 599
}

M
Mika Kuoppala 已提交
600
static inline void
601
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
602
{
603 604 605 606 607 608 609 610 611 612 613 614 615 616
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
		clflush(&engine->status_page.page_addr[reg]);
		engine->status_page.page_addr[reg] = value;
		clflush(&engine->status_page.page_addr[reg]);
		mb();
	} else {
		WRITE_ONCE(engine->status_page.page_addr[reg], value);
	}
M
Mika Kuoppala 已提交
617 618
}

619
/*
C
Chris Wilson 已提交
620 621 622 623 624 625 626 627 628 629 630
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
631
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
632
 *
633
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
634
 */
635
#define I915_GEM_HWS_INDEX		0x30
636
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
637 638
#define I915_GEM_HWS_PREEMPT_INDEX	0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
639
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
640
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
641

642
#define I915_HWS_CSB_BUF0_INDEX		0x10
643 644
#define I915_HWS_CSB_WRITE_INDEX	0x1f
#define CNL_HWS_CSB_WRITE_INDEX		0x2f
645

646 647
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
648 649 650
int intel_ring_pin(struct intel_ring *ring,
		   struct drm_i915_private *i915,
		   unsigned int offset_bias);
651
void intel_ring_reset(struct intel_ring *ring, u32 tail);
652
unsigned int intel_ring_update_space(struct intel_ring *ring);
653
void intel_ring_unpin(struct intel_ring *ring);
654
void intel_ring_free(struct intel_ring *ring);
655

656 657
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
658

659 660
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

661
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
662

663 664
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
				   unsigned int n);
665

666 667
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
668
{
669 670 671 672 673 674 675
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
676
	 */
677
	GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
678 679
}

680
static inline u32
681 682 683 684 685 686 687
intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
	return pos & (ring->size - 1);
}

static inline u32
intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
688 689
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
690 691
	u32 offset = addr - req->ring->vaddr;
	GEM_BUG_ON(offset > req->ring->size);
692
	return intel_ring_wrap(req->ring, offset);
693
}
694

695 696 697 698 699 700 701 702 703
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
	/* We could combine these into a single tail operation, but keeping
	 * them as seperate tests will help identify the cause should one
	 * ever fire.
	 */
	GEM_BUG_ON(!IS_ALIGNED(tail, 8));
	GEM_BUG_ON(tail >= ring->size);
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722

	/*
	 * "Ring Buffer Use"
	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 *
	 * We use ring->head as the last known location of the actual RING_HEAD,
	 * it may have advanced but in the worst case it is equally the same
	 * as ring->head and so we should never program RING_TAIL to advance
	 * into the same cacheline as ring->head.
	 */
#define cacheline(a) round_down(a, CACHELINE_BYTES)
	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
		   tail < ring->head);
#undef cacheline
723 724
}

725 726 727 728 729 730 731 732 733 734 735 736 737
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
	/* Whilst writes to the tail are strictly order, there is no
	 * serialisation between readers and the writers. The tail may be
	 * read by i915_gem_request_retire() just as it is being updated
	 * by execlists, as although the breadcrumb is complete, the context
	 * switch hasn't been seen.
	 */
	assert_ring_tail_valid(ring, tail);
	ring->tail = tail;
	return tail;
}
738

739
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
740

741 742
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
743
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
744
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
745

746 747 748 749
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
750

751
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
752 753
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);

754 755 756 757
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
758

759 760 761 762 763 764 765 766 767
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
768
	return READ_ONCE(engine->timeline->seqno);
769 770
}

771
int init_workarounds_ring(struct intel_engine_cs *engine);
772
int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
773

774 775 776
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

777 778 779
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
780 781 782
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
783
 */
784
#define MIN_SPACE_FOR_ADD_REQUEST 336
785

786 787
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
788
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
789 790
}

791 792 793 794 795
static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
{
	return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
}

796 797 798
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

799 800
static inline void intel_wait_init(struct intel_wait *wait,
				   struct drm_i915_gem_request *rq)
801 802
{
	wait->tsk = current;
803
	wait->request = rq;
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
820
	wait->seqno = seqno;
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
			  const struct drm_i915_gem_request *rq)
{
	return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
			 const struct drm_i915_gem_request *rq)
{
	return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
842 843 844 845 846 847 848 849 850 851 852
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
853 854
void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
				   bool wakeup);
855
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
856

857
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
858
{
859
	return READ_ONCE(engine->breadcrumbs.irq_wait);
860 861
}

862 863
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
864 865
#define ENGINE_WAKEUP_ASLEEP BIT(1)

866 867 868
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);

869 870
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
871

872
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
873
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
874
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
875

876 877 878 879 880 881 882 883 884 885 886
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
static inline u32 *
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
{
	/* We're using qword write, offset should be aligned to 8 bytes. */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	/* w/a for post sync ops following a GPGPU operation we
	 * need a prior CS_STALL, which is emitted by the flush
	 * following the batch.
	 */
	*cs++ = GFX_OP_PIPE_CONTROL(6);
	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
		PIPE_CONTROL_QW_WRITE;
	*cs++ = gtt_offset;
	*cs++ = 0;
	*cs++ = value;
	/* We're thrashing one dword of HWS. */
	*cs++ = 0;

	return cs;
}

static inline u32 *
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
{
	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
	GEM_BUG_ON(gtt_offset & (1 << 5));
	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
	*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0;
	*cs++ = value;

	return cs;
}

925
bool intel_engine_is_idle(struct intel_engine_cs *engine);
926
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
927

928 929
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);

930 931 932
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);

933
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
934
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
935

936
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
937

938 939
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);

940
#endif /* _INTEL_RINGBUFFER_H_ */