intel_ringbuffer.h 28.7 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4
#include <linux/hashtable.h>
5
#include "i915_gem_batch_pool.h"
6
#include "i915_gem_request.h"
7
#include "i915_gem_timeline.h"
8
#include "i915_selftest.h"
9

10 11
struct drm_printer;

12 13
#define I915_CMD_HASH_ORDER 9

14 15 16 17 18 19
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
20
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
21

22 23 24 25
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
26 27
};

28 29
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
30

31 32
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
33

34 35
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
36

37 38
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
39

40 41
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
42

43 44
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
45

46 47 48
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
49 50 51
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
52
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
53
	(dev_priv->semaphore->node.start + \
54
	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
55
#define GEN8_WAIT_OFFSET(__ring, from)			     \
56
	(dev_priv->semaphore->node.start + \
57
	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
58

59
enum intel_engine_hangcheck_action {
60 61 62 63 64 65 66
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
67
};
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
#define I915_MAX_SLICES	3
#define I915_MAX_SUBSLICES 3

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

111 112 113 114
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
115 116
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
117 118
};

119
struct intel_engine_hangcheck {
120
	u64 acthd;
121
	u32 seqno;
122
	enum intel_engine_hangcheck_action action;
123
	unsigned long action_timestamp;
124
	int deadlock;
125
	struct intel_instdone instdone;
126
	struct drm_i915_gem_request *active_request;
127
	bool stalled;
128 129
};

130
struct intel_ring {
131
	struct i915_vma *vma;
132
	void *vaddr;
133

134 135
	struct list_head request_list;

136 137
	u32 head;
	u32 tail;
138
	u32 emit;
139

140 141 142
	u32 space;
	u32 size;
	u32 effective_size;
143 144
};

145
struct i915_gem_context;
146
struct drm_i915_reg_table;
147

148 149 150 151 152 153 154 155 156 157 158
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
159
struct i915_ctx_workarounds {
160 161 162 163
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
164
	struct i915_vma *vma;
165 166
};

167
struct drm_i915_gem_request;
168
struct intel_render_state;
169

170 171 172 173 174 175 176 177 178 179 180 181 182
/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS = 0,
	BCS,
	VCS,
	VCS2,
#define _VCS(n) (VCS + (n))
	VECS
};

183 184 185 186 187 188
struct i915_priolist {
	struct rb_node node;
	struct list_head requests;
	int priority;
};

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
/**
 * struct intel_engine_execlists - execlist submission queue and port state
 *
 * The struct intel_engine_execlists represents the combined logical state of
 * driver and the hardware state for execlist mode of submission.
 */
struct intel_engine_execlists {
	/**
	 * @irq_tasklet: softirq tasklet for bottom handler
	 */
	struct tasklet_struct irq_tasklet;

	/**
	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
	 */
	struct i915_priolist default_priolist;

	/**
	 * @no_priolist: priority lists disabled
	 */
	bool no_priolist;

	/**
	 * @port: execlist port states
	 *
	 * For each hardware ELSP (ExecList Submission Port) we keep
	 * track of the last request and the number of times we submitted
	 * that port to hw. We then count the number of times the hw reports
	 * a context completion or preemption. As only one context can
	 * be active on hw, we limit resubmission of context to port[0]. This
	 * is called Lite Restore, of the context.
	 */
	struct execlist_port {
		/**
		 * @request_count: combined request and submission count
		 */
		struct drm_i915_gem_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
233
#define port_index(p, execlists) ((p) - (execlists)->port)
234 235 236 237 238

		/**
		 * @context_id: context ID for port
		 */
		GEM_DEBUG_DECL(u32 context_id);
239 240 241 242

#define EXECLIST_MAX_PORTS 2
	} port[EXECLIST_MAX_PORTS];

C
Chris Wilson 已提交
243
	/**
244 245 246 247 248 249 250
	 * @active: is the HW active? We consider the HW as active after
	 * submitting any context for execution and until we have seen the
	 * last context completion event. After that, we do not expect any
	 * more events until we submit, and so can park the HW.
	 *
	 * As we have a small number of different sources from which we feed
	 * the HW, we track the state of each inside a single bitfield.
C
Chris Wilson 已提交
251
	 */
252 253 254
	unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
C
Chris Wilson 已提交
255

256 257 258 259
	/**
	 * @port_mask: number of execlist ports - 1
	 */
	unsigned int port_mask;
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

	/**
	 * @queue: queue of requests, in priority lists
	 */
	struct rb_root queue;

	/**
	 * @first: leftmost level in priority @queue
	 */
	struct rb_node *first;

	/**
	 * @fw_domains: forcewake domains for irq tasklet
	 */
	unsigned int fw_domains;

	/**
	 * @csb_head: context status buffer head
	 */
	unsigned int csb_head;

	/**
	 * @csb_use_mmio: access csb through mmio, instead of hwsp
	 */
	bool csb_use_mmio;
};

287 288
#define INTEL_ENGINE_CS_MAX_NAME 8

289 290
struct intel_engine_cs {
	struct drm_i915_private *i915;
291
	char name[INTEL_ENGINE_CS_MAX_NAME];
292
	enum intel_engine_id id;
293
	unsigned int uabi_id;
294
	unsigned int hw_id;
295
	unsigned int guc_id;
296 297 298

	u8 class;
	u8 instance;
299 300
	u32 context_size;
	u32 mmio_base;
301
	unsigned int irq_shift;
302

303
	struct intel_ring *buffer;
304
	struct intel_timeline *timeline;
305

306 307
	struct intel_render_state *render_state;

308
	atomic_t irq_count;
309 310
	unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
311
#define ENGINE_IRQ_EXECLIST 1
312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
330 331 332 333
		spinlock_t irq_lock; /* protects irq_*; irqsafe */
		struct intel_wait *irq_wait; /* oldest waiter by retirement */

		spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
334
		struct rb_root waiters; /* sorted by retirement, priority */
335 336
		struct rb_root signals; /* sorted by retirement */
		struct task_struct *signaler; /* used for fence signalling */
337
		struct drm_i915_gem_request __rcu *first_signal;
338
		struct timer_list fake_irq; /* used after a missed interrupt */
339 340
		struct timer_list hangcheck; /* detect missed interrupts */

341
		unsigned int hangcheck_interrupts;
342

343
		bool irq_armed : 1;
344
		bool irq_enabled : 1;
345
		I915_SELFTEST_DECLARE(bool mock : 1);
346 347
	} breadcrumbs;

348 349 350 351 352 353 354
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

355
	struct intel_hw_status_page status_page;
356
	struct i915_ctx_workarounds wa_ctx;
357
	struct i915_vma *scratch;
358

359 360
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
361 362
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
363

364
	int		(*init_hw)(struct intel_engine_cs *engine);
365 366
	void		(*reset_hw)(struct intel_engine_cs *engine,
				    struct drm_i915_gem_request *req);
367

368 369 370
	void		(*park)(struct intel_engine_cs *engine);
	void		(*unpark)(struct intel_engine_cs *engine);

371 372
	void		(*set_default_submission)(struct intel_engine_cs *engine);

373 374
	struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
					  struct i915_gem_context *ctx);
375 376
	void		(*context_unpin)(struct intel_engine_cs *engine,
					 struct i915_gem_context *ctx);
377
	int		(*request_alloc)(struct drm_i915_gem_request *req);
378
	int		(*init_context)(struct drm_i915_gem_request *req);
379

380 381 382 383 384 385 386 387 388 389 390
	int		(*emit_flush)(struct drm_i915_gem_request *request,
				      u32 mode);
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
C
Chris Wilson 已提交
391
	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
392
					   u32 *cs);
393
	int		emit_breadcrumb_sz;
394 395 396 397 398 399 400

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
401
	void		(*submit_request)(struct drm_i915_gem_request *req);
402

403 404 405 406 407 408 409 410 411
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
	void		(*schedule)(struct drm_i915_gem_request *request,
				    int priority);

412 413 414 415 416 417 418 419
	/*
	 * Cancel all requests on the hardware, or queued for execution.
	 * This should only cancel the ready requests that have been
	 * submitted to the engine (via the engine->submit_request callback).
	 * This is called when marking the device as wedged.
	 */
	void		(*cancel_requests)(struct intel_engine_cs *engine);

420 421 422 423 424 425
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
426 427
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
428

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
466
	struct {
467
		union {
468 469 470
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
471 472
			struct {
				/* our mbox written by others */
473
				u32		wait[GEN6_NUM_SEMAPHORES];
474
				/* mboxes this ring signals to */
475
				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
476
			} mbox;
477
			u64		signal_ggtt[I915_NUM_ENGINES];
478
		};
479 480

		/* AKA wait() */
481 482
		int	(*sync_to)(struct drm_i915_gem_request *req,
				   struct drm_i915_gem_request *signal);
483
		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
484
	} semaphore;
485

486
	struct intel_engine_execlists execlists;
487

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
	struct i915_gem_context *last_retired_context;

	/* We track the current MI_SET_CONTEXT in order to eliminate
	 * redudant context switches. This presumes that requests are not
	 * reordered! Or when they are the tracking is updated along with
	 * the emission of individual requests into the legacy command
	 * stream (ring).
	 */
	struct i915_gem_context *legacy_active_context;
506

507 508 509
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

510
	struct intel_engine_hangcheck hangcheck;
511

512 513
	bool needs_cmd_parser;

514
	/*
515
	 * Table of commands the command parser needs to know about
516
	 * for this engine.
517
	 */
518
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
519 520 521 522

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
523 524
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
525 526 527 528 529

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
530
	 * If the command parser finds an entry for a command in the engine's
531
	 * cmd_tables, it gets the command's length based on the table entry.
532 533 534
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
535 536
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
537 538
};

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
		     unsigned int bit)
{
	__set_bit(bit, (unsigned long *)&execlists->active);
}

static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
		       unsigned int bit)
{
	__clear_bit(bit, (unsigned long *)&execlists->active);
}

static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
		    unsigned int bit)
{
	return test_bit(bit, (unsigned long *)&execlists->active);
}

560 561 562 563 564 565
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
	return execlists->port_mask + 1;
}

566 567 568 569
static inline void
execlists_port_complete(struct intel_engine_execlists * const execlists,
			struct execlist_port * const port)
{
570
	const unsigned int m = execlists->port_mask;
571 572

	GEM_BUG_ON(port_index(port, execlists) != 0);
573
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
574

575 576
	memmove(port, port + 1, m * sizeof(struct execlist_port));
	memset(port + m, 0, sizeof(struct execlist_port));
577 578
}

579
static inline unsigned int
580
intel_engine_flag(const struct intel_engine_cs *engine)
581
{
582
	return BIT(engine->id);
583 584
}

585
static inline u32
586
intel_read_status_page(struct intel_engine_cs *engine, int reg)
587
{
588
	/* Ensure that the compiler doesn't optimize away the load. */
589
	return READ_ONCE(engine->status_page.page_addr[reg]);
590 591
}

M
Mika Kuoppala 已提交
592
static inline void
593
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
594
{
595 596 597 598 599 600 601 602 603 604 605 606 607 608
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
		clflush(&engine->status_page.page_addr[reg]);
		engine->status_page.page_addr[reg] = value;
		clflush(&engine->status_page.page_addr[reg]);
		mb();
	} else {
		WRITE_ONCE(engine->status_page.page_addr[reg], value);
	}
M
Mika Kuoppala 已提交
609 610
}

611
/*
C
Chris Wilson 已提交
612 613 614 615 616 617 618 619 620 621 622
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
623
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
624
 *
625
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
626
 */
627
#define I915_GEM_HWS_INDEX		0x30
628
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
629
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
630
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
631

632
#define I915_HWS_CSB_BUF0_INDEX		0x10
633 634
#define I915_HWS_CSB_WRITE_INDEX	0x1f
#define CNL_HWS_CSB_WRITE_INDEX		0x2f
635

636 637
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
638 639 640
int intel_ring_pin(struct intel_ring *ring,
		   struct drm_i915_private *i915,
		   unsigned int offset_bias);
641
void intel_ring_reset(struct intel_ring *ring, u32 tail);
642
unsigned int intel_ring_update_space(struct intel_ring *ring);
643
void intel_ring_unpin(struct intel_ring *ring);
644
void intel_ring_free(struct intel_ring *ring);
645

646 647
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
648

649 650
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

651
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
652

653 654
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
				   unsigned int n);
655

656 657
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
658
{
659 660 661 662 663 664 665
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
666
	 */
667
	GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
668 669
}

670
static inline u32
671 672 673 674 675 676 677
intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
	return pos & (ring->size - 1);
}

static inline u32
intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
678 679
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
680 681
	u32 offset = addr - req->ring->vaddr;
	GEM_BUG_ON(offset > req->ring->size);
682
	return intel_ring_wrap(req->ring, offset);
683
}
684

685 686 687 688 689 690 691 692 693
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
	/* We could combine these into a single tail operation, but keeping
	 * them as seperate tests will help identify the cause should one
	 * ever fire.
	 */
	GEM_BUG_ON(!IS_ALIGNED(tail, 8));
	GEM_BUG_ON(tail >= ring->size);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712

	/*
	 * "Ring Buffer Use"
	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 *
	 * We use ring->head as the last known location of the actual RING_HEAD,
	 * it may have advanced but in the worst case it is equally the same
	 * as ring->head and so we should never program RING_TAIL to advance
	 * into the same cacheline as ring->head.
	 */
#define cacheline(a) round_down(a, CACHELINE_BYTES)
	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
		   tail < ring->head);
#undef cacheline
713 714
}

715 716 717 718 719 720 721 722 723 724 725 726 727
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
	/* Whilst writes to the tail are strictly order, there is no
	 * serialisation between readers and the writers. The tail may be
	 * read by i915_gem_request_retire() just as it is being updated
	 * by execlists, as although the breadcrumb is complete, the context
	 * switch hasn't been seen.
	 */
	assert_ring_tail_valid(ring, tail);
	ring->tail = tail;
	return tail;
}
728

729
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
730

731 732
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
733
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
734
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
735

736 737 738 739
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
740

741
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
742 743
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);

744 745 746 747
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
748

749 750 751 752 753 754 755 756 757
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
758
	return READ_ONCE(engine->timeline->seqno);
759 760
}

761
int init_workarounds_ring(struct intel_engine_cs *engine);
762
int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
763

764 765 766
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

767 768 769
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
770 771 772
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
773
 */
774
#define MIN_SPACE_FOR_ADD_REQUEST 336
775

776 777
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
778
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
779 780
}

781 782 783
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

784 785
static inline void intel_wait_init(struct intel_wait *wait,
				   struct drm_i915_gem_request *rq)
786 787
{
	wait->tsk = current;
788
	wait->request = rq;
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
805
	wait->seqno = seqno;
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
			  const struct drm_i915_gem_request *rq)
{
	return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
			 const struct drm_i915_gem_request *rq)
{
	return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
827 828 829 830 831 832 833 834 835 836 837
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
838 839
void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
				   bool wakeup);
840
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
841

842
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
843
{
844
	return READ_ONCE(engine->breadcrumbs.irq_wait);
845 846
}

847 848
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
849 850 851 852
#define ENGINE_WAKEUP_ASLEEP BIT(1)

void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
853

854
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
855
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
856
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
857

858 859 860 861 862 863 864 865 866 867 868
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

869
bool intel_engine_is_idle(struct intel_engine_cs *engine);
870
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
871

872 873
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);

874 875 876
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);

877 878
void intel_engines_reset_default_submission(struct drm_i915_private *i915);

879
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
880

881 882
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);

883
#endif /* _INTEL_RINGBUFFER_H_ */