intel_ringbuffer.h 23.2 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4
#include <linux/hashtable.h>
5
#include "i915_gem_batch_pool.h"
6
#include "i915_gem_request.h"
7
#include "i915_gem_timeline.h"
8
#include "i915_selftest.h"
9 10 11

#define I915_CMD_HASH_ORDER 9

12 13 14 15 16 17
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
18
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
19

20 21 22 23 24 25 26 27 28 29 30
/*
 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 *
 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 * cacheline, the Head Pointer must not be greater than the Tail
 * Pointer."
 */
#define I915_RING_FREE_SPACE 64

31 32 33 34
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
35 36
};

37 38
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
39

40 41
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
42

43 44
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
45

46 47
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
48

49 50
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
51

52 53
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
54

55 56 57
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
58 59 60
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
61
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
62
	(dev_priv->semaphore->node.start + \
63
	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
64
#define GEN8_WAIT_OFFSET(__ring, from)			     \
65
	(dev_priv->semaphore->node.start + \
66
	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
67

68
enum intel_engine_hangcheck_action {
69 70 71 72 73 74 75
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
76
};
77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
100

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
#define I915_MAX_SLICES	3
#define I915_MAX_SUBSLICES 3

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

120 121 122 123
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
124 125
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
126 127
};

128
struct intel_engine_hangcheck {
129
	u64 acthd;
130
	u32 seqno;
131
	enum intel_engine_hangcheck_action action;
132
	unsigned long action_timestamp;
133
	int deadlock;
134
	struct intel_instdone instdone;
135
	bool stalled;
136 137
};

138
struct intel_ring {
139
	struct i915_vma *vma;
140
	void *vaddr;
141

142
	struct intel_engine_cs *engine;
143

144 145
	struct list_head request_list;

146 147
	u32 head;
	u32 tail;
148

149 150 151 152 153
	int space;
	int size;
	int effective_size;
};

154
struct i915_gem_context;
155
struct drm_i915_reg_table;
156

157 158 159 160 161 162 163 164 165 166 167
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
168
struct i915_ctx_workarounds {
169 170 171 172
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
173
	struct i915_vma *vma;
174 175
};

176
struct drm_i915_gem_request;
177
struct intel_render_state;
178

179 180 181 182 183 184 185 186 187 188 189 190 191
/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS = 0,
	BCS,
	VCS,
	VCS2,
#define _VCS(n) (VCS + (n))
	VECS
};

192 193
struct intel_engine_cs {
	struct drm_i915_private *i915;
194
	const char	*name;
195
	enum intel_engine_id id;
196
	unsigned int exec_id;
197 198
	unsigned int hw_id;
	unsigned int guc_id;
199
	u32		mmio_base;
200
	unsigned int irq_shift;
201
	struct intel_ring *buffer;
202
	struct intel_timeline *timeline;
203

204 205
	struct intel_render_state *render_state;

206
	atomic_t irq_count;
207 208
	unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
209
#define ENGINE_IRQ_EXECLIST 1
210

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
228 229 230 231
		spinlock_t irq_lock; /* protects irq_*; irqsafe */
		struct intel_wait *irq_wait; /* oldest waiter by retirement */

		spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
232
		struct rb_root waiters; /* sorted by retirement, priority */
233 234
		struct rb_root signals; /* sorted by retirement */
		struct task_struct *signaler; /* used for fence signalling */
235
		struct drm_i915_gem_request __rcu *first_signal;
236
		struct timer_list fake_irq; /* used after a missed interrupt */
237 238
		struct timer_list hangcheck; /* detect missed interrupts */

239
		unsigned int hangcheck_interrupts;
240

241
		bool irq_armed : 1;
242
		bool irq_enabled : 1;
243
		I915_SELFTEST_DECLARE(bool mock : 1);
244 245
	} breadcrumbs;

246 247 248 249 250 251 252
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

253
	struct intel_hw_status_page status_page;
254
	struct i915_ctx_workarounds wa_ctx;
255
	struct i915_vma *scratch;
256

257 258
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
259 260
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
261

262
	int		(*init_hw)(struct intel_engine_cs *engine);
263 264
	void		(*reset_hw)(struct intel_engine_cs *engine,
				    struct drm_i915_gem_request *req);
265

266 267
	void		(*set_default_submission)(struct intel_engine_cs *engine);

268 269 270 271
	int		(*context_pin)(struct intel_engine_cs *engine,
				       struct i915_gem_context *ctx);
	void		(*context_unpin)(struct intel_engine_cs *engine,
					 struct i915_gem_context *ctx);
272
	int		(*request_alloc)(struct drm_i915_gem_request *req);
273
	int		(*init_context)(struct drm_i915_gem_request *req);
274

275 276 277 278 279 280 281 282 283 284 285
	int		(*emit_flush)(struct drm_i915_gem_request *request,
				      u32 mode);
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
C
Chris Wilson 已提交
286
	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
287
					   u32 *cs);
288
	int		emit_breadcrumb_sz;
289 290 291 292 293 294 295

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
296
	void		(*submit_request)(struct drm_i915_gem_request *req);
297

298 299 300 301 302 303 304 305 306
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
	void		(*schedule)(struct drm_i915_gem_request *request,
				    int priority);

307 308 309 310 311 312
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
313 314
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
315

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
353
	struct {
354
		union {
355 356 357
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
358 359
			struct {
				/* our mbox written by others */
360
				u32		wait[GEN6_NUM_SEMAPHORES];
361
				/* mboxes this ring signals to */
362
				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
363
			} mbox;
364
			u64		signal_ggtt[I915_NUM_ENGINES];
365
		};
366 367

		/* AKA wait() */
368 369
		int	(*sync_to)(struct drm_i915_gem_request *req,
				   struct drm_i915_gem_request *signal);
370
		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
371
	} semaphore;
372

373
	/* Execlists */
374
	struct tasklet_struct irq_tasklet;
375 376 377
	struct execlist_port {
		struct drm_i915_gem_request *request;
		unsigned int count;
378
		GEM_DEBUG_DECL(u32 context_id);
379
	} execlist_port[2];
380 381
	struct rb_root execlist_queue;
	struct rb_node *execlist_first;
382
	unsigned int fw_domains;
383

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
	struct i915_gem_context *last_retired_context;

	/* We track the current MI_SET_CONTEXT in order to eliminate
	 * redudant context switches. This presumes that requests are not
	 * reordered! Or when they are the tracking is updated along with
	 * the emission of individual requests into the legacy command
	 * stream (ring).
	 */
	struct i915_gem_context *legacy_active_context;
402

403 404 405
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

406
	struct intel_engine_hangcheck hangcheck;
407

408 409
	bool needs_cmd_parser;

410
	/*
411
	 * Table of commands the command parser needs to know about
412
	 * for this engine.
413
	 */
414
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
415 416 417 418

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
419 420
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
421 422 423 424 425

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
426
	 * If the command parser finds an entry for a command in the engine's
427
	 * cmd_tables, it gets the command's length based on the table entry.
428 429 430
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
431 432
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
433 434
};

435
static inline unsigned int
436
intel_engine_flag(const struct intel_engine_cs *engine)
437
{
438
	return BIT(engine->id);
439 440
}

441
static inline u32
442
intel_read_status_page(struct intel_engine_cs *engine, int reg)
443
{
444
	/* Ensure that the compiler doesn't optimize away the load. */
445
	return READ_ONCE(engine->status_page.page_addr[reg]);
446 447
}

M
Mika Kuoppala 已提交
448
static inline void
449
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
450
{
451 452 453 454 455 456 457 458 459 460 461 462 463 464
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
		clflush(&engine->status_page.page_addr[reg]);
		engine->status_page.page_addr[reg] = value;
		clflush(&engine->status_page.page_addr[reg]);
		mb();
	} else {
		WRITE_ONCE(engine->status_page.page_addr[reg], value);
	}
M
Mika Kuoppala 已提交
465 466
}

467
/*
C
Chris Wilson 已提交
468 469 470 471 472 473 474 475 476 477 478
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
479
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
480
 *
481
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
482
 */
483
#define I915_GEM_HWS_INDEX		0x30
484
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
485
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
486
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
487

488 489
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
490
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
491
void intel_ring_unpin(struct intel_ring *ring);
492
void intel_ring_free(struct intel_ring *ring);
493

494 495
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
496

497 498
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

499
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
500

501
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
502

503 504
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
505
{
506 507 508 509 510 511 512
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
513
	 */
514
	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
515 516
}

517
static inline u32
518 519 520 521 522 523 524
intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
	return pos & (ring->size - 1);
}

static inline u32
intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
525 526
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
527 528
	u32 offset = addr - req->ring->vaddr;
	GEM_BUG_ON(offset > req->ring->size);
529
	return intel_ring_wrap(req->ring, offset);
530
}
531

532 533 534 535 536 537 538 539 540 541 542
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
	/* We could combine these into a single tail operation, but keeping
	 * them as seperate tests will help identify the cause should one
	 * ever fire.
	 */
	GEM_BUG_ON(!IS_ALIGNED(tail, 8));
	GEM_BUG_ON(tail >= ring->size);
}

543
void intel_ring_update_space(struct intel_ring *ring);
544

545
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
546

547 548
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
549
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
550
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
551

552 553 554 555 556
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
557

558
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
559 560
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);

561 562 563 564
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
565

566 567 568 569 570 571 572 573 574
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
575
	return READ_ONCE(engine->timeline->seqno);
576 577
}

578
int init_workarounds_ring(struct intel_engine_cs *engine);
579
int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
580

581 582 583
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

584 585 586
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
587 588 589
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
590
 */
591
#define MIN_SPACE_FOR_ADD_REQUEST 336
592

593 594
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
595
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
596 597
}

598 599 600
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

601 602
static inline void intel_wait_init(struct intel_wait *wait,
				   struct drm_i915_gem_request *rq)
603 604
{
	wait->tsk = current;
605
	wait->request = rq;
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
622
	wait->seqno = seqno;
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
			  const struct drm_i915_gem_request *rq)
{
	return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
			 const struct drm_i915_gem_request *rq)
{
	return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
644 645 646 647 648 649 650 651 652 653 654
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
655
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
656
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
657

658
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
659
{
660
	return READ_ONCE(engine->breadcrumbs.irq_wait);
661 662
}

663 664
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
665 666 667 668
#define ENGINE_WAKEUP_ASLEEP BIT(1)

void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
669

670
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
671
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
672
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
673

674 675 676 677 678 679 680 681 682 683 684
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

685
bool intel_engine_is_idle(struct intel_engine_cs *engine);
686
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
687

688 689
void intel_engines_reset_default_submission(struct drm_i915_private *i915);

690
#endif /* _INTEL_RINGBUFFER_H_ */