intel_ringbuffer.h 22.8 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4
#include <linux/hashtable.h>
5
#include "i915_gem_batch_pool.h"
6
#include "i915_gem_request.h"
7
#include "i915_gem_timeline.h"
8
#include "i915_selftest.h"
9 10 11

#define I915_CMD_HASH_ORDER 9

12 13 14 15 16 17
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
18
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
19

20 21 22 23 24 25 26 27 28 29 30
/*
 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 *
 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 * cacheline, the Head Pointer must not be greater than the Tail
 * Pointer."
 */
#define I915_RING_FREE_SPACE 64

31 32 33 34
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
35 36
};

37 38
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
39

40 41
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
42

43 44
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
45

46 47
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
48

49 50
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
51

52 53
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
54

55 56 57
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
58 59 60
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
61
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
62
	(dev_priv->semaphore->node.start + \
63
	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
64
#define GEN8_WAIT_OFFSET(__ring, from)			     \
65
	(dev_priv->semaphore->node.start + \
66
	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
67

68
enum intel_engine_hangcheck_action {
69 70 71 72 73 74 75
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
76
};
77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
100

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
#define I915_MAX_SLICES	3
#define I915_MAX_SUBSLICES 3

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

120 121 122 123
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
124 125
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
126 127
};

128
struct intel_engine_hangcheck {
129
	u64 acthd;
130
	u32 seqno;
131
	enum intel_engine_hangcheck_action action;
132
	unsigned long action_timestamp;
133
	int deadlock;
134
	struct intel_instdone instdone;
135
	bool stalled;
136 137
};

138
struct intel_ring {
139
	struct i915_vma *vma;
140
	void *vaddr;
141

142
	struct intel_engine_cs *engine;
143

144 145
	struct list_head request_list;

146 147
	u32 head;
	u32 tail;
148

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	int space;
	int size;
	int effective_size;

	/** We track the position of the requests in the ring buffer, and
	 * when each is retired we increment last_retired_head as the GPU
	 * must have finished processing the request and so we know we
	 * can advance the ringbuffer up to that position.
	 *
	 * last_retired_head is set to -1 after the value is consumed so
	 * we can detect new retirements.
	 */
	u32 last_retired_head;
};

164
struct i915_gem_context;
165
struct drm_i915_reg_table;
166

167 168 169 170 171 172 173 174 175 176 177
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
178
struct i915_ctx_workarounds {
179 180 181 182
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
183
	struct i915_vma *vma;
184 185
};

186
struct drm_i915_gem_request;
187
struct intel_render_state;
188

189 190
struct intel_engine_cs {
	struct drm_i915_private *i915;
191
	const char	*name;
192
	enum intel_engine_id {
193
		RCS = 0,
194
		BCS,
195 196 197
		VCS,
		VCS2,	/* Keep instances of the same type engine together. */
		VECS
198
	} id;
199
#define _VCS(n) (VCS + (n))
200
	unsigned int exec_id;
201 202 203 204 205 206 207 208
	enum intel_engine_hw_id {
		RCS_HW = 0,
		VCS_HW,
		BCS_HW,
		VECS_HW,
		VCS2_HW
	} hw_id;
	enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
209
	u32		mmio_base;
210
	unsigned int irq_shift;
211
	struct intel_ring *buffer;
212
	struct intel_timeline *timeline;
213

214 215
	struct intel_render_state *render_state;

216
	atomic_t irq_count;
217 218
	unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
219
#define ENGINE_IRQ_EXECLIST 1
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
238
		struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
239

240
		spinlock_t lock; /* protects the lists of requests; irqsafe */
241
		struct rb_root waiters; /* sorted by retirement, priority */
242
		struct rb_root signals; /* sorted by retirement */
243
		struct intel_wait *first_wait; /* oldest waiter by retirement */
244
		struct task_struct *signaler; /* used for fence signalling */
245
		struct drm_i915_gem_request __rcu *first_signal;
246
		struct timer_list fake_irq; /* used after a missed interrupt */
247 248
		struct timer_list hangcheck; /* detect missed interrupts */

249
		unsigned int hangcheck_interrupts;
250 251 252

		bool irq_enabled : 1;
		bool rpm_wakelock : 1;
253
		I915_SELFTEST_DECLARE(bool mock : 1);
254 255
	} breadcrumbs;

256 257 258 259 260 261 262
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

263
	struct intel_hw_status_page status_page;
264
	struct i915_ctx_workarounds wa_ctx;
265
	struct i915_vma *scratch;
266

267 268
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
269 270
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
271

272
	int		(*init_hw)(struct intel_engine_cs *engine);
273 274
	void		(*reset_hw)(struct intel_engine_cs *engine,
				    struct drm_i915_gem_request *req);
275

276 277 278 279
	int		(*context_pin)(struct intel_engine_cs *engine,
				       struct i915_gem_context *ctx);
	void		(*context_unpin)(struct intel_engine_cs *engine,
					 struct i915_gem_context *ctx);
280
	int		(*request_alloc)(struct drm_i915_gem_request *req);
281
	int		(*init_context)(struct drm_i915_gem_request *req);
282

283 284 285 286 287 288 289 290 291 292 293
	int		(*emit_flush)(struct drm_i915_gem_request *request,
				      u32 mode);
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
C
Chris Wilson 已提交
294
	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
295
					   u32 *cs);
296
	int		emit_breadcrumb_sz;
297 298 299 300 301 302 303

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
304
	void		(*submit_request)(struct drm_i915_gem_request *req);
305

306 307 308 309 310 311 312 313 314
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
	void		(*schedule)(struct drm_i915_gem_request *request,
				    int priority);

315 316 317 318 319 320
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
321 322
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
361
	struct {
362
		union {
363 364 365
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
366 367
			struct {
				/* our mbox written by others */
368
				u32		wait[GEN6_NUM_SEMAPHORES];
369
				/* mboxes this ring signals to */
370
				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
371
			} mbox;
372
			u64		signal_ggtt[I915_NUM_ENGINES];
373
		};
374 375

		/* AKA wait() */
376 377
		int	(*sync_to)(struct drm_i915_gem_request *req,
				   struct drm_i915_gem_request *signal);
378
		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
379
	} semaphore;
380

381
	/* Execlists */
382
	struct tasklet_struct irq_tasklet;
383 384 385
	struct execlist_port {
		struct drm_i915_gem_request *request;
		unsigned int count;
386
		GEM_DEBUG_DECL(u32 context_id);
387
	} execlist_port[2];
388 389
	struct rb_root execlist_queue;
	struct rb_node *execlist_first;
390
	unsigned int fw_domains;
391

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
	struct i915_gem_context *last_retired_context;

	/* We track the current MI_SET_CONTEXT in order to eliminate
	 * redudant context switches. This presumes that requests are not
	 * reordered! Or when they are the tracking is updated along with
	 * the emission of individual requests into the legacy command
	 * stream (ring).
	 */
	struct i915_gem_context *legacy_active_context;
410

411
	struct intel_engine_hangcheck hangcheck;
412

413 414
	bool needs_cmd_parser;

415
	/*
416
	 * Table of commands the command parser needs to know about
417
	 * for this engine.
418
	 */
419
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
420 421 422 423

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
424 425
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
426 427 428 429 430

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
431
	 * If the command parser finds an entry for a command in the engine's
432
	 * cmd_tables, it gets the command's length based on the table entry.
433 434 435
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
436 437
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
438 439
};

440
static inline unsigned
441
intel_engine_flag(const struct intel_engine_cs *engine)
442
{
443
	return 1 << engine->id;
444 445
}

446
static inline void
447
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
448
{
449 450 451
	mb();
	clflush(&engine->status_page.page_addr[reg]);
	mb();
452 453
}

454
static inline u32
455
intel_read_status_page(struct intel_engine_cs *engine, int reg)
456
{
457
	/* Ensure that the compiler doesn't optimize away the load. */
458
	return READ_ONCE(engine->status_page.page_addr[reg]);
459 460
}

M
Mika Kuoppala 已提交
461
static inline void
462
intel_write_status_page(struct intel_engine_cs *engine,
M
Mika Kuoppala 已提交
463 464
			int reg, u32 value)
{
465
	engine->status_page.page_addr[reg] = value;
M
Mika Kuoppala 已提交
466 467
}

468
/*
C
Chris Wilson 已提交
469 470 471 472 473 474 475 476 477 478 479
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
480
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
481
 *
482
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
483
 */
484
#define I915_GEM_HWS_INDEX		0x30
485
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
486
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
487
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
488

489 490
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
491
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
492
void intel_ring_unpin(struct intel_ring *ring);
493
void intel_ring_free(struct intel_ring *ring);
494

495 496
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
497

498 499
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

500
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
501

502
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
503

504 505
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
506
{
507 508 509 510 511 512 513
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
514
	 */
515
	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
516 517
}

518 519
static inline u32
intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
520 521
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
522 523 524
	u32 offset = addr - req->ring->vaddr;
	GEM_BUG_ON(offset > req->ring->size);
	return offset & (req->ring->size - 1);
525
}
526

527
void intel_ring_update_space(struct intel_ring *ring);
528

529
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
530

531 532
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
533
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
534
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
535

536 537 538 539 540
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
541

542
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
543 544
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);

545 546 547 548
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
549

550 551 552 553 554 555 556 557 558
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
559
	return READ_ONCE(engine->timeline->seqno);
560 561
}

562
int init_workarounds_ring(struct intel_engine_cs *engine);
563
int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
564

565 566 567
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

568 569 570
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
571 572 573
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
574
 */
575
#define MIN_SPACE_FOR_ADD_REQUEST 336
576

577 578
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
579
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
580 581
}

582 583 584
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

585
static inline void intel_wait_init(struct intel_wait *wait)
586 587
{
	wait->tsk = current;
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
604
	wait->seqno = seqno;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
			  const struct drm_i915_gem_request *rq)
{
	return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
			 const struct drm_i915_gem_request *rq)
{
	return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
626 627 628 629 630 631 632 633 634 635 636
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
637
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
638
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
639

640
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
641
{
642
	return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
643 644
}

645
static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
646 647
{
	bool wakeup = false;
648

649
	/* Note that for this not to dangerously chase a dangling pointer,
650
	 * we must hold the rcu_read_lock here.
651 652 653 654 655
	 *
	 * Also note that tsk is likely to be in !TASK_RUNNING state so an
	 * early test for tsk->state != TASK_RUNNING before wake_up_process()
	 * is unlikely to be beneficial.
	 */
656 657 658 659 660 661 662 663 664 665
	if (intel_engine_has_waiter(engine)) {
		struct task_struct *tsk;

		rcu_read_lock();
		tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
		if (tsk)
			wakeup = wake_up_process(tsk);
		rcu_read_unlock();
	}

666 667 668
	return wakeup;
}

669
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
670
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
671
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
672

673 674 675 676 677 678 679 680 681 682 683
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

684
#endif /* _INTEL_RINGBUFFER_H_ */