intel_ringbuffer.h 34.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

5
#include <linux/hashtable.h>
6

7
#include "i915_gem_batch_pool.h"
8
#include "i915_gem_timeline.h"
9

10
#include "i915_reg.h"
11
#include "i915_pmu.h"
12
#include "i915_request.h"
13
#include "i915_selftest.h"
14
#include "intel_gpu_commands.h"
15

16
struct drm_printer;
17
struct i915_sched_attr;
18

19 20
#define I915_CMD_HASH_ORDER 9

21 22 23 24 25 26
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
27
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
28

29 30 31 32
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *page_addr;
	u32 ggtt_offset;
33 34
};

35 36
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
37

38 39
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
40

41 42
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
43

44 45
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
46

47 48
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
49

50 51
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
52

53 54 55
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
56
enum intel_engine_hangcheck_action {
57 58 59 60 61 62 63
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
64
};
65

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
88

89
#define I915_MAX_SLICES	3
90
#define I915_MAX_SUBSLICES 8
91 92 93 94 95 96 97

#define instdone_slice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)

#define instdone_subslice_mask(dev_priv__) \
	(INTEL_GEN(dev_priv__) == 7 ? \
98
	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
99 100 101 102 103 104 105 106 107

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

108 109 110 111
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
112 113
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
114 115
};

116
struct intel_engine_hangcheck {
117
	u64 acthd;
118
	u32 seqno;
119
	enum intel_engine_hangcheck_action action;
120
	unsigned long action_timestamp;
121
	int deadlock;
122
	struct intel_instdone instdone;
123
	struct i915_request *active_request;
124
	bool stalled;
125 126
};

127
struct intel_ring {
128
	struct i915_vma *vma;
129
	void *vaddr;
130

131 132
	struct list_head request_list;

133 134
	u32 head;
	u32 tail;
135
	u32 emit;
136

137 138 139
	u32 space;
	u32 size;
	u32 effective_size;
140 141
};

142
struct i915_gem_context;
143
struct drm_i915_reg_table;
144

145 146 147 148 149 150 151 152 153 154 155
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
156
struct i915_ctx_workarounds {
157 158 159 160
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
161
	struct i915_vma *vma;
162 163
};

164
struct i915_request;
165

166 167 168
#define I915_MAX_VCS	4
#define I915_MAX_VECS	2

169 170 171 172 173 174 175 176 177
/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS = 0,
	BCS,
	VCS,
	VCS2,
178 179
	VCS3,
	VCS4,
180
#define _VCS(n) (VCS + (n))
181 182 183
	VECS,
	VECS2
#define _VECS(n) (VECS + (n))
184 185
};

186 187 188 189 190 191
struct i915_priolist {
	struct rb_node node;
	struct list_head requests;
	int priority;
};

192 193 194 195 196 197 198 199
/**
 * struct intel_engine_execlists - execlist submission queue and port state
 *
 * The struct intel_engine_execlists represents the combined logical state of
 * driver and the hardware state for execlist mode of submission.
 */
struct intel_engine_execlists {
	/**
200
	 * @tasklet: softirq tasklet for bottom handler
201
	 */
202
	struct tasklet_struct tasklet;
203 204 205 206 207 208 209 210 211 212 213

	/**
	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
	 */
	struct i915_priolist default_priolist;

	/**
	 * @no_priolist: priority lists disabled
	 */
	bool no_priolist;

214
	/**
215 216 217
	 * @submit_reg: gen-specific execlist submission register
	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
	 * the ExecList Submission Queue Contents register array for Gen11+
218
	 */
219 220 221 222 223 224 225
	u32 __iomem *submit_reg;

	/**
	 * @ctrl_reg: the enhanced execlists control register, used to load the
	 * submit queue on the HW and to request preemptions to idle
	 */
	u32 __iomem *ctrl_reg;
226

227 228 229 230 231 232 233 234 235 236 237 238 239 240
	/**
	 * @port: execlist port states
	 *
	 * For each hardware ELSP (ExecList Submission Port) we keep
	 * track of the last request and the number of times we submitted
	 * that port to hw. We then count the number of times the hw reports
	 * a context completion or preemption. As only one context can
	 * be active on hw, we limit resubmission of context to port[0]. This
	 * is called Lite Restore, of the context.
	 */
	struct execlist_port {
		/**
		 * @request_count: combined request and submission count
		 */
241
		struct i915_request *request_count;
242 243 244 245 246 247 248
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
249
#define port_index(p, execlists) ((p) - (execlists)->port)
250 251 252 253 254

		/**
		 * @context_id: context ID for port
		 */
		GEM_DEBUG_DECL(u32 context_id);
255 256 257 258

#define EXECLIST_MAX_PORTS 2
	} port[EXECLIST_MAX_PORTS];

C
Chris Wilson 已提交
259
	/**
260 261 262 263 264 265 266
	 * @active: is the HW active? We consider the HW as active after
	 * submitting any context for execution and until we have seen the
	 * last context completion event. After that, we do not expect any
	 * more events until we submit, and so can park the HW.
	 *
	 * As we have a small number of different sources from which we feed
	 * the HW, we track the state of each inside a single bitfield.
C
Chris Wilson 已提交
267
	 */
268 269 270
	unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
271
#define EXECLISTS_ACTIVE_HWACK 2
C
Chris Wilson 已提交
272

273 274 275 276
	/**
	 * @port_mask: number of execlist ports - 1
	 */
	unsigned int port_mask;
277

278 279 280 281 282 283 284 285 286 287
	/**
	 * @queue_priority: Highest pending priority.
	 *
	 * When we add requests into the queue, or adjust the priority of
	 * executing requests, we compute the maximum priority of those
	 * pending requests. We can then use this value to determine if
	 * we need to preempt the executing requests to service the queue.
	 */
	int queue_priority;

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	/**
	 * @queue: queue of requests, in priority lists
	 */
	struct rb_root queue;

	/**
	 * @first: leftmost level in priority @queue
	 */
	struct rb_node *first;

	/**
	 * @fw_domains: forcewake domains for irq tasklet
	 */
	unsigned int fw_domains;

	/**
	 * @csb_head: context status buffer head
	 */
	unsigned int csb_head;

	/**
	 * @csb_use_mmio: access csb through mmio, instead of hwsp
	 */
	bool csb_use_mmio;
312 313 314 315 316

	/**
	 * @preempt_complete_status: expected CSB upon completing preemption
	 */
	u32 preempt_complete_status;
317 318
};

319 320
#define INTEL_ENGINE_CS_MAX_NAME 8

321 322
struct intel_engine_cs {
	struct drm_i915_private *i915;
323
	char name[INTEL_ENGINE_CS_MAX_NAME];
324

325 326
	enum intel_engine_id id;
	unsigned int hw_id;
327
	unsigned int guc_id;
328

329 330 331
	u8 uabi_id;
	u8 uabi_class;

332 333
	u8 class;
	u8 instance;
334 335 336
	u32 context_size;
	u32 mmio_base;

337
	struct intel_ring *buffer;
338
	struct intel_timeline *timeline;
339

340
	struct drm_i915_gem_object *default_state;
341

342
	atomic_t irq_count;
343 344
	unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
345
#define ENGINE_IRQ_EXECLIST 1
346

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
364 365 366 367
		spinlock_t irq_lock; /* protects irq_*; irqsafe */
		struct intel_wait *irq_wait; /* oldest waiter by retirement */

		spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
368
		struct rb_root waiters; /* sorted by retirement, priority */
369
		struct list_head signals; /* sorted by retirement */
370
		struct task_struct *signaler; /* used for fence signalling */
371

372
		struct timer_list fake_irq; /* used after a missed interrupt */
373 374
		struct timer_list hangcheck; /* detect missed interrupts */

375
		unsigned int hangcheck_interrupts;
376
		unsigned int irq_enabled;
377

378
		bool irq_armed : 1;
379
		I915_SELFTEST_DECLARE(bool mock : 1);
380 381
	} breadcrumbs;

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
	struct {
		/**
		 * @enable: Bitmask of enable sample events on this engine.
		 *
		 * Bits correspond to sample event types, for instance
		 * I915_SAMPLE_QUEUED is bit 0 etc.
		 */
		u32 enable;
		/**
		 * @enable_count: Reference count for the enabled samplers.
		 *
		 * Index number corresponds to the bit number from @enable.
		 */
		unsigned int enable_count[I915_PMU_SAMPLE_BITS];
		/**
		 * @sample: Counter values for sampling events.
		 *
		 * Our internal timer stores the current counters in this field.
		 */
401
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
402 403 404
		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
	} pmu;

405 406 407 408 409 410 411
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

412
	struct intel_hw_status_page status_page;
413
	struct i915_ctx_workarounds wa_ctx;
414
	struct i915_vma *scratch;
415

416 417
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
418 419
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
420

421
	int		(*init_hw)(struct intel_engine_cs *engine);
422
	void		(*reset_hw)(struct intel_engine_cs *engine,
423
				    struct i915_request *rq);
424

425 426 427
	void		(*park)(struct intel_engine_cs *engine);
	void		(*unpark)(struct intel_engine_cs *engine);

428 429
	void		(*set_default_submission)(struct intel_engine_cs *engine);

430 431
	struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
					  struct i915_gem_context *ctx);
432 433
	void		(*context_unpin)(struct intel_engine_cs *engine,
					 struct i915_gem_context *ctx);
434 435
	int		(*request_alloc)(struct i915_request *rq);
	int		(*init_context)(struct i915_request *rq);
436

437
	int		(*emit_flush)(struct i915_request *request, u32 mode);
438 439 440
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
441
	int		(*emit_bb_start)(struct i915_request *rq,
442 443 444 445 446
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS     BIT(2)
447
	void		(*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
448
	int		emit_breadcrumb_sz;
449 450 451 452 453 454 455

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
456
	void		(*submit_request)(struct i915_request *rq);
457

458 459 460 461 462 463
	/* Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 *
	 * Called under the struct_mutex.
	 */
464 465
	void		(*schedule)(struct i915_request *request,
				    const struct i915_sched_attr *attr);
466

467 468 469 470 471 472 473 474
	/*
	 * Cancel all requests on the hardware, or queued for execution.
	 * This should only cancel the ready requests that have been
	 * submitted to the engine (via the engine->submit_request callback).
	 * This is called when marking the device as wedged.
	 */
	void		(*cancel_requests)(struct intel_engine_cs *engine);

475 476 477 478 479 480
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
481 482
	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
	void		(*cleanup)(struct intel_engine_cs *engine);
483

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	/* GEN8 signal/wait table - never trust comments!
	 *	  signal to	signal to    signal to   signal to      signal to
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
	 *  ie. transpose of g(x, y)
	 *
	 *	 sync from	sync from    sync from    sync from	sync from
	 *	    RCS		   VCS          BCS        VECS		 VCS2
	 *      --------------------------------------------------------------------
	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
	 *	|-------------------------------------------------------------------
	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
	 *	|-------------------------------------------------------------------
	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
	 *	|-------------------------------------------------------------------
	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
	 *	|-------------------------------------------------------------------
	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
	 *	|-------------------------------------------------------------------
	 *
	 * Generalization:
	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
	 *  ie. transpose of f(x, y)
	 */
521
	struct {
522 523 524
#define GEN6_SEMAPHORE_LAST	VECS_HW
#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
525 526 527 528 529 530
		struct {
			/* our mbox written by others */
			u32		wait[GEN6_NUM_SEMAPHORES];
			/* mboxes this ring signals to */
			i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
		} mbox;
531 532

		/* AKA wait() */
533 534 535
		int	(*sync_to)(struct i915_request *rq,
				   struct i915_request *signal);
		u32	*(*signal)(struct i915_request *rq, u32 *cs);
536
	} semaphore;
537

538
	struct intel_engine_execlists execlists;
539

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
	struct i915_gem_context *last_retired_context;

	/* We track the current MI_SET_CONTEXT in order to eliminate
	 * redudant context switches. This presumes that requests are not
	 * reordered! Or when they are the tracking is updated along with
	 * the emission of individual requests into the legacy command
	 * stream (ring).
	 */
	struct i915_gem_context *legacy_active_context;
558
	struct i915_hw_ppgtt *legacy_active_ppgtt;
559

560 561 562
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

563
	struct intel_engine_hangcheck hangcheck;
564

565
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
566
#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
567
#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
568
	unsigned int flags;
569

570
	/*
571
	 * Table of commands the command parser needs to know about
572
	 * for this engine.
573
	 */
574
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
575 576 577 578

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
579 580
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
581 582 583 584 585

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
586
	 * If the command parser finds an entry for a command in the engine's
587
	 * cmd_tables, it gets the command's length based on the table entry.
588 589 590
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
591 592
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

	struct {
		/**
		 * @lock: Lock protecting the below fields.
		 */
		spinlock_t lock;
		/**
		 * @enabled: Reference count indicating number of listeners.
		 */
		unsigned int enabled;
		/**
		 * @active: Number of contexts currently scheduled in.
		 */
		unsigned int active;
		/**
		 * @enabled_at: Timestamp when busy stats were enabled.
		 */
		ktime_t enabled_at;
		/**
		 * @start: Timestamp of the last idle to active transition.
		 *
		 * Idle is defined as active == 0, active is active > 0.
		 */
		ktime_t start;
		/**
		 * @total: Total time this engine was busy.
		 *
		 * Accumulated time not counting the most recent block in cases
		 * where engine is currently busy (active > 0).
		 */
		ktime_t total;
	} stats;
625 626
};

627 628
static inline bool
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
629 630 631 632
{
	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
}

633 634
static inline bool
intel_engine_supports_stats(const struct intel_engine_cs *engine)
635 636 637 638
{
	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
}

639 640 641 642 643 644 645 646 647 648 649
static inline bool
intel_engine_has_preemption(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
}

static inline bool __execlists_need_preempt(int prio, int last)
{
	return prio > max(0, last);
}

650 651 652 653 654 655 656
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
		     unsigned int bit)
{
	__set_bit(bit, (unsigned long *)&execlists->active);
}

657 658 659 660 661 662 663
static inline bool
execlists_set_active_once(struct intel_engine_execlists *execlists,
			  unsigned int bit)
{
	return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
}

664 665 666 667 668 669 670 671 672 673 674 675 676 677
static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
		       unsigned int bit)
{
	__clear_bit(bit, (unsigned long *)&execlists->active);
}

static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
		    unsigned int bit)
{
	return test_bit(bit, (unsigned long *)&execlists->active);
}

678 679 680 681
void execlists_user_begin(struct intel_engine_execlists *execlists,
			  const struct execlist_port *port);
void execlists_user_end(struct intel_engine_execlists *execlists);

682 683 684 685 686 687
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);

void
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);

688 689 690 691 692 693
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
	return execlists->port_mask + 1;
}

694
static inline struct execlist_port *
695 696 697
execlists_port_complete(struct intel_engine_execlists * const execlists,
			struct execlist_port * const port)
{
698
	const unsigned int m = execlists->port_mask;
699 700

	GEM_BUG_ON(port_index(port, execlists) != 0);
701
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
702

703 704
	memmove(port, port + 1, m * sizeof(struct execlist_port));
	memset(port + m, 0, sizeof(struct execlist_port));
705 706

	return port;
707 708
}

709
static inline unsigned int
710
intel_engine_flag(const struct intel_engine_cs *engine)
711
{
712
	return BIT(engine->id);
713 714
}

715
static inline u32
716
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
717
{
718
	/* Ensure that the compiler doesn't optimize away the load. */
719
	return READ_ONCE(engine->status_page.page_addr[reg]);
720 721
}

M
Mika Kuoppala 已提交
722
static inline void
723
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
724
{
725 726 727 728 729 730 731 732 733 734 735 736 737 738
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
		clflush(&engine->status_page.page_addr[reg]);
		engine->status_page.page_addr[reg] = value;
		clflush(&engine->status_page.page_addr[reg]);
		mb();
	} else {
		WRITE_ONCE(engine->status_page.page_addr[reg], value);
	}
M
Mika Kuoppala 已提交
739 740
}

741
/*
C
Chris Wilson 已提交
742 743 744 745 746 747 748 749 750 751 752
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
753
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
754
 *
755
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
756
 */
757
#define I915_GEM_HWS_INDEX		0x30
758
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
759 760
#define I915_GEM_HWS_PREEMPT_INDEX	0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
761
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
762
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
763

764
#define I915_HWS_CSB_BUF0_INDEX		0x10
765 766
#define I915_HWS_CSB_WRITE_INDEX	0x1f
#define CNL_HWS_CSB_WRITE_INDEX		0x2f
767

768 769
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
770 771 772
int intel_ring_pin(struct intel_ring *ring,
		   struct drm_i915_private *i915,
		   unsigned int offset_bias);
773
void intel_ring_reset(struct intel_ring *ring, u32 tail);
774
unsigned int intel_ring_update_space(struct intel_ring *ring);
775
void intel_ring_unpin(struct intel_ring *ring);
776
void intel_ring_free(struct intel_ring *ring);
777

778 779
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
780

781 782
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

783
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
784

785
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
786
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
787

788
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
789
{
790 791 792 793 794 795 796
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
797
	 */
798
	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
799 800
}

801
static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
802 803 804 805
{
	return pos & (ring->size - 1);
}

806
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
807 808
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
809 810 811
	u32 offset = addr - rq->ring->vaddr;
	GEM_BUG_ON(offset > rq->ring->size);
	return intel_ring_wrap(rq->ring, offset);
812
}
813

814 815 816 817 818 819 820 821 822
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
	/* We could combine these into a single tail operation, but keeping
	 * them as seperate tests will help identify the cause should one
	 * ever fire.
	 */
	GEM_BUG_ON(!IS_ALIGNED(tail, 8));
	GEM_BUG_ON(tail >= ring->size);
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841

	/*
	 * "Ring Buffer Use"
	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 *
	 * We use ring->head as the last known location of the actual RING_HEAD,
	 * it may have advanced but in the worst case it is equally the same
	 * as ring->head and so we should never program RING_TAIL to advance
	 * into the same cacheline as ring->head.
	 */
#define cacheline(a) round_down(a, CACHELINE_BYTES)
	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
		   tail < ring->head);
#undef cacheline
842 843
}

844 845 846 847 848
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
	/* Whilst writes to the tail are strictly order, there is no
	 * serialisation between readers and the writers. The tail may be
849
	 * read by i915_request_retire() just as it is being updated
850 851 852 853 854 855 856
	 * by execlists, as although the breadcrumb is complete, the context
	 * switch hasn't been seen.
	 */
	assert_ring_tail_valid(ring, tail);
	ring->tail = tail;
	return tail;
}
857

858
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
859

860 861
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
862
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
863
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
864

865 866 867 868
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
869

870 871
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
872

873 874 875 876
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
877

878 879 880 881 882 883 884 885 886
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
	/* We are only peeking at the tail of the submit queue (and not the
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
887
	return READ_ONCE(engine->timeline->seqno);
888 889
}

890 891 892
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

893 894 895
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
896 897 898
 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 * we need to allocate double the largest single packet within that emission
 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
899
 */
900
#define MIN_SPACE_FOR_ADD_REQUEST 336
901

902 903
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
904
	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
905 906
}

907 908 909 910 911
static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
{
	return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
}

912 913 914
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

915
static inline void intel_wait_init(struct intel_wait *wait,
916
				   struct i915_request *rq)
917 918
{
	wait->tsk = current;
919
	wait->request = rq;
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
936
	wait->seqno = seqno;
937 938 939 940 941
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
942
			  const struct i915_request *rq)
943
{
944
	return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
945 946 947 948 949 950 951 952 953 954
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
955
			 const struct i915_request *rq)
956
{
957
	return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
958 959 960 961 962 963 964 965 966 967 968
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
969
bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
970
void intel_engine_cancel_signaling(struct i915_request *request);
971

972
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
973
{
974
	return READ_ONCE(engine->breadcrumbs.irq_wait);
975 976
}

977 978
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
979 980
#define ENGINE_WAKEUP_ASLEEP BIT(1)

981 982 983
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);

984 985
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
986

987
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
988 989
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);

990 991 992 993 994 995 996 997 998 999 1000
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
static inline u32 *
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
{
	/* We're using qword write, offset should be aligned to 8 bytes. */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	/* w/a for post sync ops following a GPGPU operation we
	 * need a prior CS_STALL, which is emitted by the flush
	 * following the batch.
	 */
	*cs++ = GFX_OP_PIPE_CONTROL(6);
	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
		PIPE_CONTROL_QW_WRITE;
	*cs++ = gtt_offset;
	*cs++ = 0;
	*cs++ = value;
	/* We're thrashing one dword of HWS. */
	*cs++ = 0;

	return cs;
}

static inline u32 *
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
{
	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
	GEM_BUG_ON(gtt_offset & (1 << 5));
	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
	*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0;
	*cs++ = value;

	return cs;
}

1039
bool intel_engine_is_idle(struct intel_engine_cs *engine);
1040
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1041

1042 1043
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);

1044 1045 1046
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);

1047
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1048
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1049

1050
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1051

1052 1053 1054 1055
__printf(3, 4)
void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...);
1056

1057 1058 1059
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

	spin_lock_irqsave(&engine->stats.lock, flags);

	if (engine->stats.enabled > 0) {
		if (engine->stats.active++ == 0)
			engine->stats.start = ktime_get();
		GEM_BUG_ON(engine->stats.active == 0);
	}

	spin_unlock_irqrestore(&engine->stats.lock, flags);
}

static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

	spin_lock_irqsave(&engine->stats.lock, flags);

	if (engine->stats.enabled > 0) {
		ktime_t last;

		if (engine->stats.active && --engine->stats.active == 0) {
			/*
			 * Decrement the active context count and in case GPU
			 * is now idle add up to the running total.
			 */
			last = ktime_sub(ktime_get(), engine->stats.start);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		} else if (engine->stats.active == 0) {
			/*
			 * After turning on engine stats, context out might be
			 * the first event in which case we account from the
			 * time stats gathering was turned on.
			 */
			last = ktime_sub(ktime_get(), engine->stats.enabled_at);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		}
	}

	spin_unlock_irqrestore(&engine->stats.lock, flags);
}

int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);

ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);

1120
#endif /* _INTEL_RINGBUFFER_H_ */