intel_ringbuffer.h 19.6 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4
#include <linux/hashtable.h>
5
#include "i915_gem_batch_pool.h"
6 7 8

#define I915_CMD_HASH_ORDER 9

9 10 11 12 13 14
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
15
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
16

17 18 19 20 21 22 23 24 25 26 27
/*
 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 *
 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 * cacheline, the Head Pointer must not be greater than the Tail
 * Pointer."
 */
#define I915_RING_FREE_SPACE 64

28
struct  intel_hw_status_page {
29
	u32		*page_addr;
30
	unsigned int	gfx_addr;
31
	struct		drm_i915_gem_object *obj;
32 33
};

34 35
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
36

37 38
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
39

40 41
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
42

43 44
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
45

46 47
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
48

49 50
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
51

52 53 54
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
55 56 57
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
58 59
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
60
	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
61 62
#define GEN8_WAIT_OFFSET(__ring, from)			     \
	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63
	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
64

65
enum intel_ring_hangcheck_action {
66
	HANGCHECK_IDLE = 0,
67 68 69 70 71
	HANGCHECK_WAIT,
	HANGCHECK_ACTIVE,
	HANGCHECK_KICK,
	HANGCHECK_HUNG,
};
72

73 74
#define HANGCHECK_SCORE_RING_HUNG 31

75
struct intel_ring_hangcheck {
76
	u64 acthd;
77
	unsigned long user_interrupts;
78
	u32 seqno;
79
	int score;
80
	enum intel_ring_hangcheck_action action;
81
	int deadlock;
82
	u32 instdone[I915_NUM_INSTDONE_REG];
83 84
};

85 86
struct intel_ringbuffer {
	struct drm_i915_gem_object *obj;
87
	void *vaddr;
88
	struct i915_vma *vma;
89

90
	struct intel_engine_cs *engine;
91
	struct list_head link;
92

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	u32 head;
	u32 tail;
	int space;
	int size;
	int effective_size;

	/** We track the position of the requests in the ring buffer, and
	 * when each is retired we increment last_retired_head as the GPU
	 * must have finished processing the request and so we know we
	 * can advance the ringbuffer up to that position.
	 *
	 * last_retired_head is set to -1 after the value is consumed so
	 * we can detect new retirements.
	 */
	u32 last_retired_head;
};

110
struct i915_gem_context;
111
struct drm_i915_reg_table;
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
struct  i915_ctx_workarounds {
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
	struct drm_i915_gem_object *obj;
};

132 133
struct drm_i915_gem_request;

134 135
struct intel_engine_cs {
	struct drm_i915_private *i915;
136
	const char	*name;
137
	enum intel_engine_id {
138
		RCS = 0,
139
		BCS,
140 141 142
		VCS,
		VCS2,	/* Keep instances of the same type engine together. */
		VECS
143
	} id;
144
#define I915_NUM_ENGINES 5
145
#define _VCS(n) (VCS + (n))
146
	unsigned int exec_id;
147 148
	unsigned int hw_id;
	unsigned int guc_id; /* XXX same as hw_id? */
149
	u64 fence_context;
150
	u32		mmio_base;
151
	unsigned int irq_shift;
152
	struct intel_ringbuffer *buffer;
153
	struct list_head buffers;
154

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
172 173 174 175
		struct task_struct *irq_seqno_bh; /* bh for user interrupts */
		unsigned long irq_wakeups;
		bool irq_posted;

176 177
		spinlock_t lock; /* protects the lists of requests */
		struct rb_root waiters; /* sorted by retirement, priority */
178
		struct rb_root signals; /* sorted by retirement */
179
		struct intel_wait *first_wait; /* oldest waiter by retirement */
180
		struct task_struct *signaler; /* used for fence signalling */
181
		struct drm_i915_gem_request *first_signal;
182
		struct timer_list fake_irq; /* used after a missed interrupt */
183 184 185

		bool irq_enabled : 1;
		bool rpm_wakelock : 1;
186 187
	} breadcrumbs;

188 189 190 191 192 193 194
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

195
	struct intel_hw_status_page status_page;
196
	struct i915_ctx_workarounds wa_ctx;
197

198 199
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
200 201
	void		(*irq_enable)(struct intel_engine_cs *ring);
	void		(*irq_disable)(struct intel_engine_cs *ring);
202

203
	int		(*init_hw)(struct intel_engine_cs *ring);
204

205
	int		(*init_context)(struct drm_i915_gem_request *req);
206

207
	void		(*write_tail)(struct intel_engine_cs *ring,
208
				      u32 value);
209
	int __must_check (*flush)(struct drm_i915_gem_request *req,
210 211
				  u32	invalidate_domains,
				  u32	flush_domains);
212
	int		(*add_request)(struct drm_i915_gem_request *req);
213 214 215 216 217 218
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
219
	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
220
	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
B
Ben Widawsky 已提交
221
					       u64 offset, u32 length,
222
					       unsigned dispatch_flags);
223
#define I915_DISPATCH_SECURE 0x1
224
#define I915_DISPATCH_PINNED 0x2
225
#define I915_DISPATCH_RS     0x4
226
	void		(*cleanup)(struct intel_engine_cs *ring);
227

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
265
	struct {
266
		u32	sync_seqno[I915_NUM_ENGINES-1];
267

268 269 270
		union {
			struct {
				/* our mbox written by others */
271
				u32		wait[I915_NUM_ENGINES];
272
				/* mboxes this ring signals to */
273
				i915_reg_t	signal[I915_NUM_ENGINES];
274
			} mbox;
275
			u64		signal_ggtt[I915_NUM_ENGINES];
276
		};
277 278

		/* AKA wait() */
279 280
		int	(*sync_to)(struct drm_i915_gem_request *to_req,
				   struct intel_engine_cs *from,
281
				   u32 seqno);
282
		int	(*signal)(struct drm_i915_gem_request *signaller_req,
283 284
				  /* num_dwords needed by caller */
				  unsigned int num_dwords);
285
	} semaphore;
286

287
	/* Execlists */
288 289
	struct tasklet_struct irq_tasklet;
	spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
290
	struct list_head execlist_queue;
291
	unsigned int fw_domains;
292 293
	unsigned int next_context_status_buffer;
	unsigned int idle_lite_restore_wa;
294 295
	bool disable_lite_restore_wa;
	u32 ctx_desc_template;
296
	int		(*emit_request)(struct drm_i915_gem_request *request);
297
	int		(*emit_flush)(struct drm_i915_gem_request *request,
298 299
				      u32 invalidate_domains,
				      u32 flush_domains);
300
	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
301
					 u64 offset, unsigned dispatch_flags);
302

303 304 305 306 307
	/**
	 * List of objects currently involved in rendering from the
	 * ringbuffer.
	 *
	 * Includes buffers having the contents of their GPU caches
308
	 * flushed, not necessarily primitives.  last_read_req
309 310 311 312 313 314 315 316 317 318 319 320
	 * represents when the rendering involved will be completed.
	 *
	 * A reference is held on the buffer while on this list.
	 */
	struct list_head active_list;

	/**
	 * List of breadcrumbs associated with GPU requests currently
	 * outstanding.
	 */
	struct list_head request_list;

321 322 323 324 325 326 327
	/**
	 * Seqno of request most recently submitted to request_list.
	 * Used exclusively by hang checker to avoid grabbing lock while
	 * inspecting request list.
	 */
	u32 last_submitted_seqno;

328
	bool gpu_caches_dirty;
329

330
	struct i915_gem_context *last_context;
331

332 333
	struct intel_ring_hangcheck hangcheck;

334 335 336 337
	struct {
		struct drm_i915_gem_object *obj;
		u32 gtt_offset;
	} scratch;
338

339 340
	bool needs_cmd_parser;

341
	/*
342
	 * Table of commands the command parser needs to know about
343 344
	 * for this ring.
	 */
345
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
346 347 348 349

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
350 351
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
352 353 354 355 356 357 358 359 360 361 362 363

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
	 * If the command parser finds an entry for a command in the ring's
	 * cmd_tables, it gets the command's length based on the table entry.
	 * If not, it calls this function to determine the per-ring length field
	 * encoding for the command (i.e. certain opcode ranges use certain bits
	 * to encode the command length in the header).
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
364 365
};

366
static inline bool
367
intel_engine_initialized(const struct intel_engine_cs *engine)
368
{
369
	return engine->i915 != NULL;
370
}
371

372
static inline unsigned
373
intel_engine_flag(const struct intel_engine_cs *engine)
374
{
375
	return 1 << engine->id;
376 377
}

378
static inline u32
379
intel_ring_sync_index(struct intel_engine_cs *engine,
380
		      struct intel_engine_cs *other)
381 382 383 384
{
	int idx;

	/*
R
Rodrigo Vivi 已提交
385 386 387 388 389
	 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
	 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
	 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
	 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
390 391
	 */

392
	idx = (other - engine) - 1;
393
	if (idx < 0)
394
		idx += I915_NUM_ENGINES;
395 396 397 398

	return idx;
}

399
static inline void
400
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
401
{
402 403 404
	mb();
	clflush(&engine->status_page.page_addr[reg]);
	mb();
405 406
}

407
static inline u32
408
intel_read_status_page(struct intel_engine_cs *engine, int reg)
409
{
410
	/* Ensure that the compiler doesn't optimize away the load. */
411
	return READ_ONCE(engine->status_page.page_addr[reg]);
412 413
}

M
Mika Kuoppala 已提交
414
static inline void
415
intel_write_status_page(struct intel_engine_cs *engine,
M
Mika Kuoppala 已提交
416 417
			int reg, u32 value)
{
418
	engine->status_page.page_addr[reg] = value;
M
Mika Kuoppala 已提交
419 420
}

421
/*
C
Chris Wilson 已提交
422 423 424 425 426 427 428 429 430 431 432
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
433
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
434
 *
435
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
436
 */
437
#define I915_GEM_HWS_INDEX		0x30
438
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
439
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
440
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
441

442 443
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
444
int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
445
				     struct intel_ringbuffer *ringbuf);
446 447
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
448

449 450
void intel_stop_engine(struct intel_engine_cs *engine);
void intel_cleanup_engine(struct intel_engine_cs *engine);
451

452 453
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);

454
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
455
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
456 457 458 459

static inline void __intel_ringbuffer_emit(struct intel_ringbuffer *rb,
					   u32 data)
{
460
	*(uint32_t *)(rb->vaddr + rb->tail) = data;
461 462 463 464 465 466 467 468 469
	rb->tail += 4;
}

static inline void __intel_ringbuffer_advance(struct intel_ringbuffer *rb)
{
	rb->tail &= rb->size - 1;
}

static inline void intel_ring_emit(struct intel_engine_cs *engine, u32 data)
470
{
471
	__intel_ringbuffer_emit(engine->buffer, data);
472
}
473

474
static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
475
				       i915_reg_t reg)
476
{
477
	intel_ring_emit(engine, i915_mmio_reg_offset(reg));
478
}
479

480
static inline void intel_ring_advance(struct intel_engine_cs *engine)
481
{
482
	__intel_ringbuffer_advance(engine->buffer);
483
}
484

485
int __intel_ring_space(int head, int tail, int size);
486
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
487

488
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
489
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
490
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
491
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
492

493
int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
494
void intel_fini_pipe_control(struct intel_engine_cs *engine);
495

496 497 498
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);

499 500 501 502 503
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
504

505
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
506 507 508 509
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
510

511
int init_workarounds_ring(struct intel_engine_cs *engine);
512

513
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
514
{
515
	return ringbuf->tail;
516 517
}

518 519 520
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
521 522 523
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
524
 */
525
#define MIN_SPACE_FOR_ADD_REQUEST 336
526

527 528 529 530 531
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
	return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
}

532 533 534 535 536 537 538
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
struct intel_wait {
	struct rb_node node;
	struct task_struct *tsk;
	u32 seqno;
};

539 540 541 542 543
struct intel_signal_node {
	struct rb_node node;
	struct intel_wait wait;
};

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
561
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
562 563 564

static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
{
565
	return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
566 567 568 569 570
}

static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
{
	bool wakeup = false;
571
	struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	/* Note that for this not to dangerously chase a dangling pointer,
	 * the caller is responsible for ensure that the task remain valid for
	 * wake_up_process() i.e. that the RCU grace period cannot expire.
	 *
	 * Also note that tsk is likely to be in !TASK_RUNNING state so an
	 * early test for tsk->state != TASK_RUNNING before wake_up_process()
	 * is unlikely to be beneficial.
	 */
	if (tsk)
		wakeup = wake_up_process(tsk);
	return wakeup;
}

void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
588
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
589

590
#endif /* _INTEL_RINGBUFFER_H_ */