intel_ringbuffer.h 34.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

5
#include <linux/hashtable.h>
6

7
#include "i915_gem_batch_pool.h"
8
#include "i915_gem_timeline.h"
9

10
#include "i915_reg.h"
11
#include "i915_pmu.h"
12
#include "i915_request.h"
13
#include "i915_selftest.h"
14
#include "intel_gpu_commands.h"
15

16 17
struct drm_printer;

18 19
#define I915_CMD_HASH_ORDER 9

20 21 22 23 24 25
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
26
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
27

28 29 30 31
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
32 33
};

34 35
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
36

37 38
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
39

40 41
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
42

43 44
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
45

46 47
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
48

49 50
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
51

52 53 54
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
55
enum intel_engine_hangcheck_action {
56 57 58 59 60 61 62
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
63
};
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
87

88
#define I915_MAX_SLICES	3
89
#define I915_MAX_SUBSLICES 8
90 91 92 93 94 95 96

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
97
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
98 99 100 101 102 103 104 105 106

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

107 108 109 110
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
111 112
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
113 114
};

115
struct intel_engine_hangcheck {
116
	u64 acthd;
117
	u32 seqno;
118
	enum intel_engine_hangcheck_action action;
119
	unsigned long action_timestamp;
120
	int deadlock;
121
	struct intel_instdone instdone;
122
	struct i915_request *active_request;
123
	bool stalled;
124 125
};

126
struct intel_ring {
127
	struct i915_vma *vma;
128
	void *vaddr;
129

130 131
	struct list_head request_list;

132 133
	u32 head;
	u32 tail;
134
	u32 emit;
135

136 137 138
	u32 space;
	u32 size;
	u32 effective_size;
139 140
};

141
struct i915_gem_context;
142
struct drm_i915_reg_table;
143

144 145 146 147 148 149 150 151 152 153 154
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
155
struct i915_ctx_workarounds {
156 157 158 159
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
160
	struct i915_vma *vma;
161 162
};

163
struct i915_request;
164

165 166 167
#define I915_MAX_VCS	4
#define I915_MAX_VECS	2

168 169 170 171 172 173 174 175 176
/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS = 0,
	BCS,
	VCS,
	VCS2,
177 178
	VCS3,
	VCS4,
179
#define _VCS(n) (VCS + (n))
180 181 182
	VECS,
	VECS2
#define _VECS(n) (VECS + (n))
183 184
};

185 186 187 188 189 190
struct i915_priolist {
	struct rb_node node;
	struct list_head requests;
	int priority;
};

191 192 193 194 195 196 197 198
/**
 * struct intel_engine_execlists - execlist submission queue and port state
 *
 * The struct intel_engine_execlists represents the combined logical state of
 * driver and the hardware state for execlist mode of submission.
 */
struct intel_engine_execlists {
	/**
199
	 * @tasklet: softirq tasklet for bottom handler
200
	 */
201
	struct tasklet_struct tasklet;
202 203 204 205 206 207 208 209 210 211 212

	/**
	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
	 */
	struct i915_priolist default_priolist;

	/**
	 * @no_priolist: priority lists disabled
	 */
	bool no_priolist;

213
	/**
214 215 216
	 * @submit_reg: gen-specific execlist submission register
	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
	 * the ExecList Submission Queue Contents register array for Gen11+
217
	 */
218 219 220 221 222 223 224
	u32 __iomem *submit_reg;

	/**
	 * @ctrl_reg: the enhanced execlists control register, used to load the
	 * submit queue on the HW and to request preemptions to idle
	 */
	u32 __iomem *ctrl_reg;
225

226 227 228 229 230 231 232 233 234 235 236 237 238 239
	/**
	 * @port: execlist port states
	 *
	 * For each hardware ELSP (ExecList Submission Port) we keep
	 * track of the last request and the number of times we submitted
	 * that port to hw. We then count the number of times the hw reports
	 * a context completion or preemption. As only one context can
	 * be active on hw, we limit resubmission of context to port[0]. This
	 * is called Lite Restore, of the context.
	 */
	struct execlist_port {
		/**
		 * @request_count: combined request and submission count
		 */
240
		struct i915_request *request_count;
241 242 243 244 245 246 247
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
248
#define port_index(p, execlists) ((p) - (execlists)->port)
249 250 251 252 253

		/**
		 * @context_id: context ID for port
		 */
		GEM_DEBUG_DECL(u32 context_id);
254 255 256 257

#define EXECLIST_MAX_PORTS 2
	} port[EXECLIST_MAX_PORTS];

C
Chris Wilson 已提交
258
	/**
259 260 261 262 263 264 265
	 * @active: is the HW active? We consider the HW as active after
	 * submitting any context for execution and until we have seen the
	 * last context completion event. After that, we do not expect any
	 * more events until we submit, and so can park the HW.
	 *
	 * As we have a small number of different sources from which we feed
	 * the HW, we track the state of each inside a single bitfield.
C
Chris Wilson 已提交
266
	 */
267 268 269
	unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
270
#define EXECLISTS_ACTIVE_HWACK 2
C
Chris Wilson 已提交
271

272 273 274 275
	/**
	 * @port_mask: number of execlist ports - 1
	 */
	unsigned int port_mask;
276

277 278 279 280 281 282 283 284 285 286
	/**
	 * @queue_priority: Highest pending priority.
	 *
	 * When we add requests into the queue, or adjust the priority of
	 * executing requests, we compute the maximum priority of those
	 * pending requests. We can then use this value to determine if
	 * we need to preempt the executing requests to service the queue.
	 */
	int queue_priority;

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	/**
	 * @queue: queue of requests, in priority lists
	 */
	struct rb_root queue;

	/**
	 * @first: leftmost level in priority @queue
	 */
	struct rb_node *first;

	/**
	 * @fw_domains: forcewake domains for irq tasklet
	 */
	unsigned int fw_domains;

	/**
	 * @csb_head: context status buffer head
	 */
	unsigned int csb_head;

	/**
	 * @csb_use_mmio: access csb through mmio, instead of hwsp
	 */
	bool csb_use_mmio;
311 312 313 314 315

	/**
	 * @preempt_complete_status: expected CSB upon completing preemption
	 */
	u32 preempt_complete_status;
316 317
};

318 319
#define INTEL_ENGINE_CS_MAX_NAME 8

320 321
struct intel_engine_cs {
	struct drm_i915_private *i915;
322
	char name[INTEL_ENGINE_CS_MAX_NAME];
323

324 325
	enum intel_engine_id id;
	unsigned int hw_id;
326
	unsigned int guc_id;
327

328 329 330
	u8 uabi_id;
	u8 uabi_class;

331 332
	u8 class;
	u8 instance;
333 334 335
	u32 context_size;
	u32 mmio_base;

336
	struct intel_ring *buffer;
337
	struct intel_timeline *timeline;
338

339
	struct drm_i915_gem_object *default_state;
340

341
	atomic_t irq_count;
342 343
	unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
344
#define ENGINE_IRQ_EXECLIST 1
345

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
363 364 365 366
		spinlock_t irq_lock; /* protects irq_*; irqsafe */
		struct intel_wait *irq_wait; /* oldest waiter by retirement */

		spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
367
		struct rb_root waiters; /* sorted by retirement, priority */
368
		struct list_head signals; /* sorted by retirement */
369
		struct task_struct *signaler; /* used for fence signalling */
370

371
		struct timer_list fake_irq; /* used after a missed interrupt */
372 373
		struct timer_list hangcheck; /* detect missed interrupts */

374
		unsigned int hangcheck_interrupts;
375
		unsigned int irq_enabled;
376

377
		bool irq_armed : 1;
378
		I915_SELFTEST_DECLARE(bool mock : 1);
379 380
	} breadcrumbs;

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	struct {
		/**
		 * @enable: Bitmask of enable sample events on this engine.
		 *
		 * Bits correspond to sample event types, for instance
		 * I915_SAMPLE_QUEUED is bit 0 etc.
		 */
		u32 enable;
		/**
		 * @enable_count: Reference count for the enabled samplers.
		 *
		 * Index number corresponds to the bit number from @enable.
		 */
		unsigned int enable_count[I915_PMU_SAMPLE_BITS];
		/**
		 * @sample: Counter values for sampling events.
		 *
		 * Our internal timer stores the current counters in this field.
		 */
400
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
401 402 403
		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
	} pmu;

404 405 406 407 408 409 410
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

411
	struct intel_hw_status_page status_page;
412
	struct i915_ctx_workarounds wa_ctx;
413
	struct i915_vma *scratch;
414

415 416
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
417 418
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
419

420
	int		(*init_hw)(struct intel_engine_cs *engine);
421
	void		(*reset_hw)(struct intel_engine_cs *engine,
422
				    struct i915_request *rq);
423

424 425 426
	void		(*park)(struct intel_engine_cs *engine);
	void		(*unpark)(struct intel_engine_cs *engine);

427 428
	void		(*set_default_submission)(struct intel_engine_cs *engine);

429 430
	struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
					  struct i915_gem_context *ctx);
431 432
	void		(*context_unpin)(struct intel_engine_cs *engine,
					 struct i915_gem_context *ctx);
433 434
	int		(*request_alloc)(struct i915_request *rq);
	int		(*init_context)(struct i915_request *rq);
435

436
	int		(*emit_flush)(struct i915_request *request, u32 mode);
437 438 439
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
440
	int		(*emit_bb_start)(struct i915_request *rq,
441 442 443 444 445
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
446
	void		(*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
447
	int		emit_breadcrumb_sz;
448 449 450 451 452 453 454

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
455
	void		(*submit_request)(struct i915_request *rq);
456

457 458 459 460 461 462
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
463
	void		(*schedule)(struct i915_request *request, int priority);
464

465 466 467 468 469 470 471 472
	/*
	 * Cancel all requests on the hardware, or queued for execution.
	 * This should only cancel the ready requests that have been
	 * submitted to the engine (via the engine->submit_request callback).
	 * This is called when marking the device as wedged.
	 */
	void		(*cancel_requests)(struct intel_engine_cs *engine);

473 474 475 476 477 478
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
479 480
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
481

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
519
	struct {
520 521 522
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
523 524 525 526 527 528
		struct {
			/* our mbox written by others */
			u32		wait[GEN6_NUM_SEMAPHORES];
			/* mboxes this ring signals to */
			i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
		} mbox;
529 530

		/* AKA wait() */
531 532 533
		int	(*sync_to)(struct i915_request *rq,
				   struct i915_request *signal);
		u32	*(*signal)(struct i915_request *rq, u32 *cs);
534
	} semaphore;
535

536
	struct intel_engine_execlists execlists;
537

538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
	struct i915_gem_context *last_retired_context;

	/* We track the current MI_SET_CONTEXT in order to eliminate
	 * redudant context switches. This presumes that requests are not
	 * reordered! Or when they are the tracking is updated along with
	 * the emission of individual requests into the legacy command
	 * stream (ring).
	 */
	struct i915_gem_context *legacy_active_context;
556
	struct i915_hw_ppgtt *legacy_active_ppgtt;
557

558 559 560
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

561
	struct intel_engine_hangcheck hangcheck;
562

563
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
564
#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
565
	unsigned int flags;
566

567
	/*
568
	 * Table of commands the command parser needs to know about
569
	 * for this engine.
570
	 */
571
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
572 573 574 575

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
576 577
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
578 579 580 581 582

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
583
	 * If the command parser finds an entry for a command in the engine's
584
	 * cmd_tables, it gets the command's length based on the table entry.
585 586 587
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
588 589
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621

	struct {
		/**
		 * @lock: Lock protecting the below fields.
		 */
		spinlock_t lock;
		/**
		 * @enabled: Reference count indicating number of listeners.
		 */
		unsigned int enabled;
		/**
		 * @active: Number of contexts currently scheduled in.
		 */
		unsigned int active;
		/**
		 * @enabled_at: Timestamp when busy stats were enabled.
		 */
		ktime_t enabled_at;
		/**
		 * @start: Timestamp of the last idle to active transition.
		 *
		 * Idle is defined as active == 0, active is active > 0.
		 */
		ktime_t start;
		/**
		 * @total: Total time this engine was busy.
		 *
		 * Accumulated time not counting the most recent block in cases
		 * where engine is currently busy (active > 0).
		 */
		ktime_t total;
	} stats;
622 623
};

624 625 626 627 628
static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
}

629 630 631 632 633
static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
}

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
		     unsigned int bit)
{
	__set_bit(bit, (unsigned long *)&execlists->active);
}

static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
		       unsigned int bit)
{
	__clear_bit(bit, (unsigned long *)&execlists->active);
}

static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
		    unsigned int bit)
{
	return test_bit(bit, (unsigned long *)&execlists->active);
}

655 656 657 658 659 660
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);

void
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);

661 662 663 664 665 666
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
	return execlists->port_mask + 1;
}

667 668 669 670
static inline void
execlists_port_complete(struct intel_engine_execlists * const execlists,
			struct execlist_port * const port)
{
671
	const unsigned int m = execlists->port_mask;
672 673

	GEM_BUG_ON(port_index(port, execlists) != 0);
674
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
675

676 677
	memmove(port, port + 1, m * sizeof(struct execlist_port));
	memset(port + m, 0, sizeof(struct execlist_port));
678 679
}

680
static inline unsigned int
681
intel_engine_flag(const struct intel_engine_cs *engine)
682
{
683
	return BIT(engine->id);
684 685
}

686
static inline u32
687
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
688
{
689
	/* Ensure that the compiler doesn't optimize away the load. */
690
	return READ_ONCE(engine->status_page.page_addr[reg]);
691 692
}

M
Mika Kuoppala 已提交
693
static inline void
694
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
695
{
696 697 698 699 700 701 702 703 704 705 706 707 708 709
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
		clflush(&engine->status_page.page_addr[reg]);
		engine->status_page.page_addr[reg] = value;
		clflush(&engine->status_page.page_addr[reg]);
		mb();
	} else {
		WRITE_ONCE(engine->status_page.page_addr[reg], value);
	}
M
Mika Kuoppala 已提交
710 711
}

712
/*
C
Chris Wilson 已提交
713 714 715 716 717 718 719 720 721 722 723
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
724
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
725
 *
726
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
727
 */
728
#define I915_GEM_HWS_INDEX		0x30
729
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
730 731
#define I915_GEM_HWS_PREEMPT_INDEX	0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
732
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
733
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
734

735
#define I915_HWS_CSB_BUF0_INDEX		0x10
736 737
#define I915_HWS_CSB_WRITE_INDEX	0x1f
#define CNL_HWS_CSB_WRITE_INDEX		0x2f
738

739 740
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
741 742 743
int intel_ring_pin(struct intel_ring *ring,
		   struct drm_i915_private *i915,
		   unsigned int offset_bias);
744
void intel_ring_reset(struct intel_ring *ring, u32 tail);
745
unsigned int intel_ring_update_space(struct intel_ring *ring);
746
void intel_ring_unpin(struct intel_ring *ring);
747
void intel_ring_free(struct intel_ring *ring);
748

749 750
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
751

752 753
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

754
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
755

756
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
757
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
758

759
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
760
{
761 762 763 764 765 766 767
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
768
	 */
769
	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
770 771
}

772
static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
773 774 775 776
{
	return pos & (ring->size - 1);
}

777
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
778 779
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
780 781 782
	u32 offset = addr - rq->ring->vaddr;
	GEM_BUG_ON(offset > rq->ring->size);
	return intel_ring_wrap(rq->ring, offset);
783
}
784

785 786 787 788 789 790 791 792 793
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
	/* We could combine these into a single tail operation, but keeping
	 * them as seperate tests will help identify the cause should one
	 * ever fire.
	 */
	GEM_BUG_ON(!IS_ALIGNED(tail, 8));
	GEM_BUG_ON(tail >= ring->size);
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

	/*
	 * "Ring Buffer Use"
	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 *
	 * We use ring->head as the last known location of the actual RING_HEAD,
	 * it may have advanced but in the worst case it is equally the same
	 * as ring->head and so we should never program RING_TAIL to advance
	 * into the same cacheline as ring->head.
	 */
#define cacheline(a) round_down(a, CACHELINE_BYTES)
	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
		   tail < ring->head);
#undef cacheline
813 814
}

815 816 817 818 819
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
	/* Whilst writes to the tail are strictly order, there is no
	 * serialisation between readers and the writers. The tail may be
820
	 * read by i915_request_retire() just as it is being updated
821 822 823 824 825 826 827
	 * by execlists, as although the breadcrumb is complete, the context
	 * switch hasn't been seen.
	 */
	assert_ring_tail_valid(ring, tail);
	ring->tail = tail;
	return tail;
}
828

829
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
830

831 832
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
833
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
834
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
835

836 837 838 839
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
840

841 842
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
843

844 845 846 847
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
848

849 850 851 852 853 854 855 856 857
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
858
	return READ_ONCE(engine->timeline->seqno);
859 860
}

861
int init_workarounds_ring(struct intel_engine_cs *engine);
862
int intel_ring_workarounds_emit(struct i915_request *rq);
863

864 865 866
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

867 868 869
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
870 871 872
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
873
 */
874
#define MIN_SPACE_FOR_ADD_REQUEST 336
875

876 877
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
878
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
879 880
}

881 882 883 884 885
static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
{
	return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
}

886 887 888
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

889
static inline void intel_wait_init(struct intel_wait *wait,
890
				   struct i915_request *rq)
891 892
{
	wait->tsk = current;
893
	wait->request = rq;
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
910
	wait->seqno = seqno;
911 912 913 914 915
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
916
			  const struct i915_request *rq)
917
{
918
	return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
919 920 921 922 923 924 925 926 927 928
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
929
			 const struct i915_request *rq)
930
{
931
	return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
932 933 934 935 936 937 938 939 940 941 942
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
943
bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
944
void intel_engine_cancel_signaling(struct i915_request *request);
945

946
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
947
{
948
	return READ_ONCE(engine->breadcrumbs.irq_wait);
949 950
}

951 952
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
953 954
#define ENGINE_WAKEUP_ASLEEP BIT(1)

955 956 957
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);

958 959
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
960

961
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
962 963
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);

964 965 966 967 968 969 970 971 972 973 974
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static inline u32 *
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
{
	/* We're using qword write, offset should be aligned to 8 bytes. */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	/* w/a for post sync ops following a GPGPU operation we
	 * need a prior CS_STALL, which is emitted by the flush
	 * following the batch.
	 */
	*cs++ = GFX_OP_PIPE_CONTROL(6);
	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
		PIPE_CONTROL_QW_WRITE;
	*cs++ = gtt_offset;
	*cs++ = 0;
	*cs++ = value;
	/* We're thrashing one dword of HWS. */
	*cs++ = 0;

	return cs;
}

static inline u32 *
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
{
	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
	GEM_BUG_ON(gtt_offset & (1 << 5));
	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
	*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0;
	*cs++ = value;

	return cs;
}

1013
bool intel_engine_is_idle(struct intel_engine_cs *engine);
1014
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1015

1016 1017
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);

1018 1019 1020
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);

1021
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1022
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1023

1024
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1025

1026 1027 1028 1029
__printf(3, 4)
void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...);
1030

1031 1032 1033
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

	spin_lock_irqsave(&engine->stats.lock, flags);

	if (engine->stats.enabled > 0) {
		if (engine->stats.active++ == 0)
			engine->stats.start = ktime_get();
		GEM_BUG_ON(engine->stats.active == 0);
	}

	spin_unlock_irqrestore(&engine->stats.lock, flags);
}

static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

	spin_lock_irqsave(&engine->stats.lock, flags);

	if (engine->stats.enabled > 0) {
		ktime_t last;

		if (engine->stats.active && --engine->stats.active == 0) {
			/*
			 * Decrement the active context count and in case GPU
			 * is now idle add up to the running total.
			 */
			last = ktime_sub(ktime_get(), engine->stats.start);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		} else if (engine->stats.active == 0) {
			/*
			 * After turning on engine stats, context out might be
			 * the first event in which case we account from the
			 * time stats gathering was turned on.
			 */
			last = ktime_sub(ktime_get(), engine->stats.enabled_at);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		}
	}

	spin_unlock_irqrestore(&engine->stats.lock, flags);
}

int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);

ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);

1094
#endif /* _INTEL_RINGBUFFER_H_ */