intel_ringbuffer.h 20.2 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4
#include <linux/hashtable.h>
5
#include "i915_gem_batch_pool.h"
6
#include "i915_gem_request.h"
7
#include "i915_gem_timeline.h"
8 9 10

#define I915_CMD_HASH_ORDER 9

11 12 13 14 15 16
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
17
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
18

19 20 21 22 23 24 25 26 27 28 29
/*
 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 *
 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 * cacheline, the Head Pointer must not be greater than the Tail
 * Pointer."
 */
#define I915_RING_FREE_SPACE 64

30 31 32 33
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
34 35
};

36 37
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
38

39 40
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
41

42 43
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
44

45 46
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
47

48 49
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
50

51 52
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
53

54 55 56
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
57 58 59
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
60
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
61
	(dev_priv->semaphore->node.start + \
62
	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
63
#define GEN8_WAIT_OFFSET(__ring, from)			     \
64
	(dev_priv->semaphore->node.start + \
65
	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
66

67
enum intel_engine_hangcheck_action {
68
	HANGCHECK_IDLE = 0,
69
	HANGCHECK_WAIT,
70 71 72
	HANGCHECK_ACTIVE_SEQNO,
	HANGCHECK_ACTIVE_HEAD,
	HANGCHECK_ACTIVE_SUBUNITS,
73 74 75
	HANGCHECK_KICK,
	HANGCHECK_HUNG,
};
76

77 78
#define HANGCHECK_SCORE_RING_HUNG 31

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#define I915_MAX_SLICES	3
#define I915_MAX_SUBSLICES 3

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

98 99 100 101
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
102 103
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
104 105
};

106
struct intel_engine_hangcheck {
107
	u64 acthd;
108
	u32 seqno;
109
	int score;
110
	enum intel_engine_hangcheck_action action;
111
	int deadlock;
112
	struct intel_instdone instdone;
113 114
};

115
struct intel_ring {
116
	struct i915_vma *vma;
117
	void *vaddr;
118

119
	struct intel_engine_cs *engine;
120

121 122
	struct list_head request_list;

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
	u32 head;
	u32 tail;
	int space;
	int size;
	int effective_size;

	/** We track the position of the requests in the ring buffer, and
	 * when each is retired we increment last_retired_head as the GPU
	 * must have finished processing the request and so we know we
	 * can advance the ringbuffer up to that position.
	 *
	 * last_retired_head is set to -1 after the value is consumed so
	 * we can detect new retirements.
	 */
	u32 last_retired_head;
};

140
struct i915_gem_context;
141
struct drm_i915_reg_table;
142

143 144 145 146 147 148 149 150 151 152 153
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
154
struct i915_ctx_workarounds {
155 156 157 158
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
159
	struct i915_vma *vma;
160 161
};

162
struct drm_i915_gem_request;
163
struct intel_render_state;
164

165 166
struct intel_engine_cs {
	struct drm_i915_private *i915;
167
	const char	*name;
168
	enum intel_engine_id {
169
		RCS = 0,
170
		BCS,
171 172 173
		VCS,
		VCS2,	/* Keep instances of the same type engine together. */
		VECS
174
	} id;
175
#define _VCS(n) (VCS + (n))
176
	unsigned int exec_id;
177 178 179 180 181 182 183 184
	enum intel_engine_hw_id {
		RCS_HW = 0,
		VCS_HW,
		BCS_HW,
		VECS_HW,
		VCS2_HW
	} hw_id;
	enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
185
	u32		mmio_base;
186
	unsigned int irq_shift;
187
	struct intel_ring *buffer;
188
	struct intel_timeline *timeline;
189

190 191
	struct intel_render_state *render_state;

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
209
		struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
210 211
		bool irq_posted;

212
		spinlock_t lock; /* protects the lists of requests; irqsafe */
213
		struct rb_root waiters; /* sorted by retirement, priority */
214
		struct rb_root signals; /* sorted by retirement */
215
		struct intel_wait *first_wait; /* oldest waiter by retirement */
216
		struct task_struct *signaler; /* used for fence signalling */
217
		struct drm_i915_gem_request *first_signal;
218
		struct timer_list fake_irq; /* used after a missed interrupt */
219 220 221
		struct timer_list hangcheck; /* detect missed interrupts */

		unsigned long timeout;
222 223 224

		bool irq_enabled : 1;
		bool rpm_wakelock : 1;
225 226
	} breadcrumbs;

227 228 229 230 231 232 233
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

234
	struct intel_hw_status_page status_page;
235
	struct i915_ctx_workarounds wa_ctx;
236
	struct i915_vma *scratch;
237

238 239
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
240 241
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
242

243
	int		(*init_hw)(struct intel_engine_cs *engine);
244 245
	void		(*reset_hw)(struct intel_engine_cs *engine,
				    struct drm_i915_gem_request *req);
246

247
	int		(*init_context)(struct drm_i915_gem_request *req);
248

249 250 251 252 253 254 255 256 257 258 259
	int		(*emit_flush)(struct drm_i915_gem_request *request,
				      u32 mode);
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
C
Chris Wilson 已提交
260 261
	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
					   u32 *out);
262
	int		emit_breadcrumb_sz;
263 264 265 266 267 268 269

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
270
	void		(*submit_request)(struct drm_i915_gem_request *req);
271

272 273 274 275 276 277 278 279 280
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
	void		(*schedule)(struct drm_i915_gem_request *request,
				    int priority);

281 282 283 284 285 286
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
287 288
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
289

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
327
	struct {
328
		union {
329 330 331
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
332 333
			struct {
				/* our mbox written by others */
334
				u32		wait[GEN6_NUM_SEMAPHORES];
335
				/* mboxes this ring signals to */
336
				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
337
			} mbox;
338
			u64		signal_ggtt[I915_NUM_ENGINES];
339
		};
340 341

		/* AKA wait() */
342 343
		int	(*sync_to)(struct drm_i915_gem_request *req,
				   struct drm_i915_gem_request *signal);
C
Chris Wilson 已提交
344
		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *out);
345
	} semaphore;
346

347
	/* Execlists */
348
	struct tasklet_struct irq_tasklet;
349 350 351 352
	struct execlist_port {
		struct drm_i915_gem_request *request;
		unsigned int count;
	} execlist_port[2];
353 354
	struct rb_root execlist_queue;
	struct rb_node *execlist_first;
355
	unsigned int fw_domains;
356
	bool disable_lite_restore_wa;
357
	bool preempt_wa;
358
	u32 ctx_desc_template;
359

360
	struct i915_gem_context *last_context;
361

362
	struct intel_engine_hangcheck hangcheck;
363

364 365
	bool needs_cmd_parser;

366
	/*
367
	 * Table of commands the command parser needs to know about
368
	 * for this engine.
369
	 */
370
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
371 372 373 374

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
375 376
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
377 378 379 380 381

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
382
	 * If the command parser finds an entry for a command in the engine's
383
	 * cmd_tables, it gets the command's length based on the table entry.
384 385 386
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
387 388
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
389 390
};

391
static inline unsigned
392
intel_engine_flag(const struct intel_engine_cs *engine)
393
{
394
	return 1 << engine->id;
395 396
}

397
static inline void
398
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
399
{
400 401 402
	mb();
	clflush(&engine->status_page.page_addr[reg]);
	mb();
403 404
}

405
static inline u32
406
intel_read_status_page(struct intel_engine_cs *engine, int reg)
407
{
408
	/* Ensure that the compiler doesn't optimize away the load. */
409
	return READ_ONCE(engine->status_page.page_addr[reg]);
410 411
}

M
Mika Kuoppala 已提交
412
static inline void
413
intel_write_status_page(struct intel_engine_cs *engine,
M
Mika Kuoppala 已提交
414 415
			int reg, u32 value)
{
416
	engine->status_page.page_addr[reg] = value;
M
Mika Kuoppala 已提交
417 418
}

419
/*
C
Chris Wilson 已提交
420 421 422 423 424 425 426 427 428 429 430
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
431
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
432
 *
433
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
434
 */
435
#define I915_GEM_HWS_INDEX		0x30
436
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
437
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
438
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
439

440 441
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
442 443
int intel_ring_pin(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
444
void intel_ring_free(struct intel_ring *ring);
445

446 447
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
448

449 450
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

451 452
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);

453
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
454
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
455

456
static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
457
{
458 459
	*(uint32_t *)(ring->vaddr + ring->tail) = data;
	ring->tail += 4;
460 461
}

462
static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
463
{
464
	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
465
}
466

467
static inline void intel_ring_advance(struct intel_ring *ring)
468
{
469 470 471 472 473 474 475
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
476
	 */
477 478
}

C
Chris Wilson 已提交
479
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
480 481
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
C
Chris Wilson 已提交
482 483
	u32 offset = addr - ring->vaddr;
	return offset & (ring->size - 1);
484
}
485

486
int __intel_ring_space(int head, int tail, int size);
487
void intel_ring_update_space(struct intel_ring *ring);
488

489
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
490

491 492
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
493
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
494
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
495

496 497 498 499 500
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
501

502
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
503 504
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);

505 506 507 508
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
509

510 511 512 513 514 515 516 517 518 519 520 521
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
	return READ_ONCE(engine->timeline->last_submitted_seqno);
}

522
int init_workarounds_ring(struct intel_engine_cs *engine);
523

524 525 526
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

527 528 529
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
530 531 532
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
533
 */
534
#define MIN_SPACE_FOR_ADD_REQUEST 336
535

536 537
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
538
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
559
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
560

561
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
562
{
563
	return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
564 565
}

566
static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
567 568
{
	bool wakeup = false;
569

570
	/* Note that for this not to dangerously chase a dangling pointer,
571
	 * we must hold the rcu_read_lock here.
572 573 574 575 576
	 *
	 * Also note that tsk is likely to be in !TASK_RUNNING state so an
	 * early test for tsk->state != TASK_RUNNING before wake_up_process()
	 * is unlikely to be beneficial.
	 */
577 578 579 580 581 582 583 584 585 586
	if (intel_engine_has_waiter(engine)) {
		struct task_struct *tsk;

		rcu_read_lock();
		tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
		if (tsk)
			wakeup = wake_up_process(tsk);
		rcu_read_unlock();
	}

587 588 589
	return wakeup;
}

590
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
591
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
592
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
593

594
#endif /* _INTEL_RINGBUFFER_H_ */