intel_ringbuffer.h 32.3 KB
Newer Older
1
/* SPDX-License-Identifier: MIT */
2 3 4
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

5 6
#include <drm/drm_util.h>

7
#include <linux/hashtable.h>
8
#include <linux/seqlock.h>
9

10
#include "i915_gem_batch_pool.h"
11

12
#include "i915_reg.h"
13
#include "i915_pmu.h"
14
#include "i915_request.h"
15
#include "i915_selftest.h"
16
#include "i915_timeline.h"
17
#include "intel_gpu_commands.h"
18
#include "intel_workarounds.h"
19

20
struct drm_printer;
21
struct i915_sched_attr;
22

23 24
#define I915_CMD_HASH_ORDER 9

25 26 27 28 29 30
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
31
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
32

33 34
struct intel_hw_status_page {
	struct i915_vma *vma;
35
	u32 *addr;
36 37
};

38 39
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
40

41 42
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
43

44 45
#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
46

47 48
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
49

50 51
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
52

53 54
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
55

56 57 58
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
59
enum intel_engine_hangcheck_action {
60 61 62 63 64 65 66
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
67
};
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
91

92
#define I915_MAX_SLICES	3
93
#define I915_MAX_SUBSLICES 8
94 95

#define instdone_slice_mask(dev_priv__) \
96
	(IS_GEN(dev_priv__, 7) ? \
97
	 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
98 99

#define instdone_subslice_mask(dev_priv__) \
100
	(IS_GEN(dev_priv__, 7) ? \
101
	 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
102 103 104 105 106 107 108 109 110

#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
	for ((slice__) = 0, (subslice__) = 0; \
	     (slice__) < I915_MAX_SLICES; \
	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
	       (slice__) += ((subslice__) == 0)) \
		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))

111 112 113 114
struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
115 116
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
117 118
};

119
struct intel_engine_hangcheck {
120
	u64 acthd;
121
	u32 seqno;
122
	unsigned long action_timestamp;
123
	struct intel_instdone instdone;
124 125
};

126
struct intel_ring {
127
	struct i915_vma *vma;
128
	void *vaddr;
129

130
	struct i915_timeline *timeline;
131
	struct list_head request_list;
132
	struct list_head active_link;
133

134 135
	u32 head;
	u32 tail;
136
	u32 emit;
137

138 139 140
	u32 space;
	u32 size;
	u32 effective_size;
141 142
};

143
struct i915_gem_context;
144
struct drm_i915_reg_table;
145

146 147 148 149 150 151 152 153 154 155 156
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
157
struct i915_ctx_workarounds {
158 159 160 161
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
162
	struct i915_vma *vma;
163 164
};

165
struct i915_request;
166

167 168 169
#define I915_MAX_VCS	4
#define I915_MAX_VECS	2

170 171 172 173 174 175 176 177 178
/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS = 0,
	BCS,
	VCS,
	VCS2,
179 180
	VCS3,
	VCS4,
181
#define _VCS(n) (VCS + (n))
182 183 184
	VECS,
	VECS2
#define _VECS(n) (VECS + (n))
185 186
};

187
struct i915_priolist {
188
	struct list_head requests[I915_PRIORITY_COUNT];
189
	struct rb_node node;
190
	unsigned long used;
191 192 193
	int priority;
};

194 195 196 197 198 199 200 201 202 203
#define priolist_for_each_request(it, plist, idx) \
	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
		list_for_each_entry(it, &(plist)->requests[idx], sched.link)

#define priolist_for_each_request_consume(it, n, plist, idx) \
	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
		list_for_each_entry_safe(it, n, \
					 &(plist)->requests[idx - 1], \
					 sched.link)

204 205 206 207 208
struct st_preempt_hang {
	struct completion completion;
	bool inject_hang;
};

209 210 211 212 213 214 215 216
/**
 * struct intel_engine_execlists - execlist submission queue and port state
 *
 * The struct intel_engine_execlists represents the combined logical state of
 * driver and the hardware state for execlist mode of submission.
 */
struct intel_engine_execlists {
	/**
217
	 * @tasklet: softirq tasklet for bottom handler
218
	 */
219
	struct tasklet_struct tasklet;
220 221 222 223 224 225 226 227 228 229 230

	/**
	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
	 */
	struct i915_priolist default_priolist;

	/**
	 * @no_priolist: priority lists disabled
	 */
	bool no_priolist;

231
	/**
232 233 234
	 * @submit_reg: gen-specific execlist submission register
	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
	 * the ExecList Submission Queue Contents register array for Gen11+
235
	 */
236 237 238 239 240 241 242
	u32 __iomem *submit_reg;

	/**
	 * @ctrl_reg: the enhanced execlists control register, used to load the
	 * submit queue on the HW and to request preemptions to idle
	 */
	u32 __iomem *ctrl_reg;
243

244 245 246 247 248 249 250 251 252 253 254 255 256 257
	/**
	 * @port: execlist port states
	 *
	 * For each hardware ELSP (ExecList Submission Port) we keep
	 * track of the last request and the number of times we submitted
	 * that port to hw. We then count the number of times the hw reports
	 * a context completion or preemption. As only one context can
	 * be active on hw, we limit resubmission of context to port[0]. This
	 * is called Lite Restore, of the context.
	 */
	struct execlist_port {
		/**
		 * @request_count: combined request and submission count
		 */
258
		struct i915_request *request_count;
259 260 261 262 263 264 265
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
266
#define port_index(p, execlists) ((p) - (execlists)->port)
267 268 269 270 271

		/**
		 * @context_id: context ID for port
		 */
		GEM_DEBUG_DECL(u32 context_id);
272 273 274 275

#define EXECLIST_MAX_PORTS 2
	} port[EXECLIST_MAX_PORTS];

C
Chris Wilson 已提交
276
	/**
277 278 279 280 281 282 283
	 * @active: is the HW active? We consider the HW as active after
	 * submitting any context for execution and until we have seen the
	 * last context completion event. After that, we do not expect any
	 * more events until we submit, and so can park the HW.
	 *
	 * As we have a small number of different sources from which we feed
	 * the HW, we track the state of each inside a single bitfield.
C
Chris Wilson 已提交
284
	 */
285 286 287
	unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
288
#define EXECLISTS_ACTIVE_HWACK 2
C
Chris Wilson 已提交
289

290 291 292 293
	/**
	 * @port_mask: number of execlist ports - 1
	 */
	unsigned int port_mask;
294

295 296 297 298 299 300 301 302 303 304
	/**
	 * @queue_priority: Highest pending priority.
	 *
	 * When we add requests into the queue, or adjust the priority of
	 * executing requests, we compute the maximum priority of those
	 * pending requests. We can then use this value to determine if
	 * we need to preempt the executing requests to service the queue.
	 */
	int queue_priority;

305 306 307
	/**
	 * @queue: queue of requests, in priority lists
	 */
308
	struct rb_root_cached queue;
309 310

	/**
311 312 313
	 * @csb_write: control register for Context Switch buffer
	 *
	 * Note this register may be either mmio or HWSP shadow.
314
	 */
315
	u32 *csb_write;
316 317

	/**
318 319 320
	 * @csb_status: status array for Context Switch buffer
	 *
	 * Note these register may be either mmio or HWSP shadow.
321
	 */
322
	u32 *csb_status;
323 324 325 326 327

	/**
	 * @preempt_complete_status: expected CSB upon completing preemption
	 */
	u32 preempt_complete_status;
328 329 330 331 332

	/**
	 * @csb_head: context status buffer head
	 */
	u8 csb_head;
333 334

	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
335 336
};

337 338
#define INTEL_ENGINE_CS_MAX_NAME 8

339 340
struct intel_engine_cs {
	struct drm_i915_private *i915;
341
	char name[INTEL_ENGINE_CS_MAX_NAME];
342

343 344
	enum intel_engine_id id;
	unsigned int hw_id;
345
	unsigned int guc_id;
346

347 348 349
	u8 uabi_id;
	u8 uabi_class;

350 351
	u8 class;
	u8 instance;
352 353 354
	u32 context_size;
	u32 mmio_base;

355
	struct intel_ring *buffer;
356 357

	struct i915_timeline timeline;
358

359
	struct drm_i915_gem_object *default_state;
360
	void *pinned_default_state;
361

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
379 380 381 382
		spinlock_t irq_lock; /* protects irq_*; irqsafe */
		struct intel_wait *irq_wait; /* oldest waiter by retirement */

		spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
383
		struct rb_root waiters; /* sorted by retirement, priority */
384
		struct list_head signals; /* sorted by retirement */
385
		struct task_struct *signaler; /* used for fence signalling */
386

387
		struct timer_list fake_irq; /* used after a missed interrupt */
388 389
		struct timer_list hangcheck; /* detect missed interrupts */

390
		unsigned int hangcheck_interrupts;
391
		unsigned int irq_enabled;
392
		unsigned int irq_count;
393

394
		bool irq_armed : 1;
395 396
	} breadcrumbs;

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
	struct {
		/**
		 * @enable: Bitmask of enable sample events on this engine.
		 *
		 * Bits correspond to sample event types, for instance
		 * I915_SAMPLE_QUEUED is bit 0 etc.
		 */
		u32 enable;
		/**
		 * @enable_count: Reference count for the enabled samplers.
		 *
		 * Index number corresponds to the bit number from @enable.
		 */
		unsigned int enable_count[I915_PMU_SAMPLE_BITS];
		/**
		 * @sample: Counter values for sampling events.
		 *
		 * Our internal timer stores the current counters in this field.
		 */
416
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
417 418 419
		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
	} pmu;

420 421 422 423 424 425 426
	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
	struct i915_gem_batch_pool batch_pool;

427
	struct intel_hw_status_page status_page;
428
	struct i915_ctx_workarounds wa_ctx;
429
	struct i915_wa_list ctx_wa_list;
430
	struct i915_wa_list wa_list;
431
	struct i915_wa_list whitelist;
432

433 434
	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
435 436
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);
437

438
	int		(*init_hw)(struct intel_engine_cs *engine);
439 440

	struct {
441 442
		void (*prepare)(struct intel_engine_cs *engine);
		void (*reset)(struct intel_engine_cs *engine, bool stalled);
443 444
		void (*finish)(struct intel_engine_cs *engine);
	} reset;
445

446 447 448
	void		(*park)(struct intel_engine_cs *engine);
	void		(*unpark)(struct intel_engine_cs *engine);

449 450
	void		(*set_default_submission)(struct intel_engine_cs *engine);

451 452 453
	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
					     struct i915_gem_context *ctx);

454 455
	int		(*request_alloc)(struct i915_request *rq);
	int		(*init_context)(struct i915_request *rq);
456

457
	int		(*emit_flush)(struct i915_request *request, u32 mode);
458 459 460
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
461
	int		(*emit_bb_start)(struct i915_request *rq,
462 463 464 465
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
466
	u32		*(*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
467
	int		emit_breadcrumb_dw;
468 469 470 471 472 473 474

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
475
	void		(*submit_request)(struct i915_request *rq);
476

477 478
	/*
	 * Call when the priority on a request has changed and it and its
479 480 481
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 */
482 483
	void		(*schedule)(struct i915_request *request,
				    const struct i915_sched_attr *attr);
484

485 486 487 488 489 490 491 492
	/*
	 * Cancel all requests on the hardware, or queued for execution.
	 * This should only cancel the ready requests that have been
	 * submitted to the engine (via the engine->submit_request callback).
	 * This is called when marking the device as wedged.
	 */
	void		(*cancel_requests)(struct intel_engine_cs *engine);

493
	void		(*cleanup)(struct intel_engine_cs *engine);
494

495
	struct intel_engine_execlists execlists;
496

497 498 499 500 501 502 503 504 505
	/* Contexts are pinned whilst they are active on the GPU. The last
	 * context executed remains active whilst the GPU is idle - the
	 * switch away and write to the context object only occurs on the
	 * next execution.  Contexts are only unpinned on retirement of the
	 * following request ensuring that we can always write to the object
	 * on the context switch even after idling. Across suspend, we switch
	 * to the kernel context and trash it as the save may not happen
	 * before the hardware is powered down.
	 */
506
	struct intel_context *last_retired_context;
507

508 509 510
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

511
	struct intel_engine_hangcheck hangcheck;
512

513
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
514
#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
515
#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
516
	unsigned int flags;
517

518
	/*
519
	 * Table of commands the command parser needs to know about
520
	 * for this engine.
521
	 */
522
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
523 524 525 526

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
527 528
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;
529 530 531 532 533

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
534
	 * If the command parser finds an entry for a command in the engine's
535
	 * cmd_tables, it gets the command's length based on the table entry.
536 537 538
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
539 540
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);
541 542 543 544 545

	struct {
		/**
		 * @lock: Lock protecting the below fields.
		 */
546
		seqlock_t lock;
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
		/**
		 * @enabled: Reference count indicating number of listeners.
		 */
		unsigned int enabled;
		/**
		 * @active: Number of contexts currently scheduled in.
		 */
		unsigned int active;
		/**
		 * @enabled_at: Timestamp when busy stats were enabled.
		 */
		ktime_t enabled_at;
		/**
		 * @start: Timestamp of the last idle to active transition.
		 *
		 * Idle is defined as active == 0, active is active > 0.
		 */
		ktime_t start;
		/**
		 * @total: Total time this engine was busy.
		 *
		 * Accumulated time not counting the most recent block in cases
		 * where engine is currently busy (active > 0).
		 */
		ktime_t total;
	} stats;
573 574
};

575 576
static inline bool
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
577 578 579 580
{
	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
}

581 582
static inline bool
intel_engine_supports_stats(const struct intel_engine_cs *engine)
583 584 585 586
{
	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
}

587 588 589 590 591 592 593 594 595 596 597
static inline bool
intel_engine_has_preemption(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
}

static inline bool __execlists_need_preempt(int prio, int last)
{
	return prio > max(0, last);
}

598 599 600 601 602 603 604
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
		     unsigned int bit)
{
	__set_bit(bit, (unsigned long *)&execlists->active);
}

605 606 607 608 609 610 611
static inline bool
execlists_set_active_once(struct intel_engine_execlists *execlists,
			  unsigned int bit)
{
	return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
}

612 613 614 615 616 617 618
static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
		       unsigned int bit)
{
	__clear_bit(bit, (unsigned long *)&execlists->active);
}

619 620 621 622 623 624
static inline void
execlists_clear_all_active(struct intel_engine_execlists *execlists)
{
	execlists->active = 0;
}

625 626 627 628 629 630 631
static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
		    unsigned int bit)
{
	return test_bit(bit, (unsigned long *)&execlists->active);
}

632 633 634 635
void execlists_user_begin(struct intel_engine_execlists *execlists,
			  const struct execlist_port *port);
void execlists_user_end(struct intel_engine_execlists *execlists);

636 637 638 639 640 641
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);

void
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);

642 643 644 645 646 647
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
	return execlists->port_mask + 1;
}

648
static inline struct execlist_port *
649 650 651
execlists_port_complete(struct intel_engine_execlists * const execlists,
			struct execlist_port * const port)
{
652
	const unsigned int m = execlists->port_mask;
653 654

	GEM_BUG_ON(port_index(port, execlists) != 0);
655
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
656

657 658
	memmove(port, port + 1, m * sizeof(struct execlist_port));
	memset(port + m, 0, sizeof(struct execlist_port));
659 660

	return port;
661 662
}

663
static inline unsigned int
664
intel_engine_flag(const struct intel_engine_cs *engine)
665
{
666
	return BIT(engine->id);
667 668
}

669
static inline u32
670
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
671
{
672
	/* Ensure that the compiler doesn't optimize away the load. */
673
	return READ_ONCE(engine->status_page.addr[reg]);
674 675
}

M
Mika Kuoppala 已提交
676
static inline void
677
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
678
{
679 680 681 682 683 684 685
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
686 687 688
		clflush(&engine->status_page.addr[reg]);
		engine->status_page.addr[reg] = value;
		clflush(&engine->status_page.addr[reg]);
689 690
		mb();
	} else {
691
		WRITE_ONCE(engine->status_page.addr[reg], value);
692
	}
M
Mika Kuoppala 已提交
693 694
}

695
/*
C
Chris Wilson 已提交
696 697 698 699 700 701 702 703 704 705 706
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
707
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
708
 *
709
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
710
 */
711
#define I915_GEM_HWS_INDEX		0x30
712 713 714 715 716
#define I915_GEM_HWS_INDEX_ADDR		(I915_GEM_HWS_INDEX * sizeof(u32))
#define I915_GEM_HWS_PREEMPT		0x32
#define I915_GEM_HWS_PREEMPT_ADDR	(I915_GEM_HWS_PREEMPT * sizeof(u32))
#define I915_GEM_HWS_SCRATCH		0x40
#define I915_GEM_HWS_SCRATCH_ADDR	(I915_GEM_HWS_SCRATCH * sizeof(u32))
C
Chris Wilson 已提交
717

718
#define I915_HWS_CSB_BUF0_INDEX		0x10
719 720
#define I915_HWS_CSB_WRITE_INDEX	0x1f
#define CNL_HWS_CSB_WRITE_INDEX		0x2f
721

722
struct intel_ring *
723
intel_engine_create_ring(struct intel_engine_cs *engine,
724
			 struct i915_timeline *timeline,
725
			 int size);
726
int intel_ring_pin(struct intel_ring *ring);
727
void intel_ring_reset(struct intel_ring *ring, u32 tail);
728
unsigned int intel_ring_update_space(struct intel_ring *ring);
729
void intel_ring_unpin(struct intel_ring *ring);
730
void intel_ring_free(struct intel_ring *ring);
731

732 733
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
734

735 736
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);

737
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
738

739
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
740

741
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
742
{
743 744 745 746 747 748 749
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
750
	 */
751
	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
752 753
}

754
static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
755 756 757 758
{
	return pos & (ring->size - 1);
}

759 760 761 762 763 764 765 766 767 768 769 770 771
static inline bool
intel_ring_offset_valid(const struct intel_ring *ring,
			unsigned int pos)
{
	if (pos & -ring->size) /* must be strictly within the ring */
		return false;

	if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
		return false;

	return true;
}

772
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
773 774
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
775 776 777
	u32 offset = addr - rq->ring->vaddr;
	GEM_BUG_ON(offset > rq->ring->size);
	return intel_ring_wrap(rq->ring, offset);
778
}
779

780 781 782
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
783
	GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802

	/*
	 * "Ring Buffer Use"
	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 *
	 * We use ring->head as the last known location of the actual RING_HEAD,
	 * it may have advanced but in the worst case it is equally the same
	 * as ring->head and so we should never program RING_TAIL to advance
	 * into the same cacheline as ring->head.
	 */
#define cacheline(a) round_down(a, CACHELINE_BYTES)
	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
		   tail < ring->head);
#undef cacheline
803 804
}

805 806 807 808 809
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
	/* Whilst writes to the tail are strictly order, there is no
	 * serialisation between readers and the writers. The tail may be
810
	 * read by i915_request_retire() just as it is being updated
811 812 813 814 815 816 817
	 * by execlists, as although the breadcrumb is complete, the context
	 * switch hasn't been seen.
	 */
	assert_ring_tail_valid(ring, tail);
	ring->tail = tail;
	return tail;
}
818

819
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
820

821 822
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
823
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
824

825 826 827 828
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
829

830
int intel_engine_stop_cs(struct intel_engine_cs *engine);
831
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
832

833 834
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);

835 836
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
837

838 839
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
840 841
	/*
	 * We are only peeking at the tail of the submit queue (and not the
842 843 844 845 846 847
	 * queue itself) in order to gain a hint as to the current active
	 * state of the engine. Callers are not expected to be taking
	 * engine->timeline->lock, nor are they expected to be concerned
	 * wtih serialising this hint with anything, so document it as
	 * a hint and nothing more.
	 */
848
	return READ_ONCE(engine->timeline.seqno);
849 850
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}

static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
					 u32 seqno)
{
	return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
}

static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
					      u32 seqno)
{
	GEM_BUG_ON(!seqno);
	return intel_engine_signaled(engine, seqno);
}

static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
					    u32 seqno)
{
	GEM_BUG_ON(!seqno);
	return intel_engine_signaled(engine, seqno - 1);
}

876 877 878
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

879 880 881
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);

882
static inline void intel_wait_init(struct intel_wait *wait)
883 884
{
	wait->tsk = current;
885
	wait->request = NULL;
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
}

static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
	wait->tsk = current;
	wait->seqno = seqno;
}

static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
	return wait->seqno;
}

static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
902
	wait->seqno = seqno;
903 904 905 906 907
	return intel_wait_has_seqno(wait);
}

static inline bool
intel_wait_update_request(struct intel_wait *wait,
908
			  const struct i915_request *rq)
909
{
910
	return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
911 912 913 914 915 916 917 918 919 920
}

static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
	return wait->seqno == seqno;
}

static inline bool
intel_wait_check_request(const struct intel_wait *wait,
921
			 const struct i915_request *rq)
922
{
923
	return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
924 925 926 927 928 929 930 931 932 933 934
}

static inline bool intel_wait_complete(const struct intel_wait *wait)
{
	return RB_EMPTY_NODE(&wait->node);
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait);
935
bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
936
void intel_engine_cancel_signaling(struct i915_request *request);
937

938
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
939
{
940
	return READ_ONCE(engine->breadcrumbs.irq_wait);
941 942
}

943 944
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
945 946
#define ENGINE_WAKEUP_ASLEEP BIT(1)

947 948 949
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);

950 951
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
952

953
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
954 955
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);

956 957 958 959 960 961 962 963 964 965 966
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

967
static inline u32 *
968
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
969 970 971 972 973 974 975 976 977
{
	/* We're using qword write, offset should be aligned to 8 bytes. */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	/* w/a for post sync ops following a GPGPU operation we
	 * need a prior CS_STALL, which is emitted by the flush
	 * following the batch.
	 */
	*cs++ = GFX_OP_PIPE_CONTROL(6);
978
	*cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
	*cs++ = gtt_offset;
	*cs++ = 0;
	*cs++ = value;
	/* We're thrashing one dword of HWS. */
	*cs++ = 0;

	return cs;
}

static inline u32 *
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
{
	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
	GEM_BUG_ON(gtt_offset & (1 << 5));
	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
	*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0;
	*cs++ = value;

	return cs;
}

1004 1005 1006 1007 1008 1009 1010
static inline void intel_engine_reset(struct intel_engine_cs *engine,
				      bool stalled)
{
	if (engine->reset.reset)
		engine->reset.reset(engine, stalled);
}

1011
void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
1012

1013
bool intel_engine_is_idle(struct intel_engine_cs *engine);
1014
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1015

1016
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
1017
void intel_engine_lost_context(struct intel_engine_cs *engine);
1018

1019 1020 1021
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);

1022
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1023
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1024

1025
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1026

1027 1028 1029 1030
__printf(3, 4)
void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...);
1031

1032 1033 1034
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);

1035 1036 1037 1038 1039 1040 1041
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

1042
	write_seqlock_irqsave(&engine->stats.lock, flags);
1043 1044 1045 1046 1047 1048 1049

	if (engine->stats.enabled > 0) {
		if (engine->stats.active++ == 0)
			engine->stats.start = ktime_get();
		GEM_BUG_ON(engine->stats.active == 0);
	}

1050
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
1051 1052 1053 1054 1055 1056 1057 1058 1059
}

static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

1060
	write_seqlock_irqsave(&engine->stats.lock, flags);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086

	if (engine->stats.enabled > 0) {
		ktime_t last;

		if (engine->stats.active && --engine->stats.active == 0) {
			/*
			 * Decrement the active context count and in case GPU
			 * is now idle add up to the running total.
			 */
			last = ktime_sub(ktime_get(), engine->stats.start);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		} else if (engine->stats.active == 0) {
			/*
			 * After turning on engine stats, context out might be
			 * the first event in which case we account from the
			 * time stats gathering was turned on.
			 */
			last = ktime_sub(ktime_get(), engine->stats.enabled_at);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		}
	}

1087
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
1088 1089 1090 1091 1092 1093 1094
}

int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);

ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
{
	if (!execlists->preempt_hang.inject_hang)
		return false;

	complete(&execlists->preempt_hang.completion);
	return true;
}

#else

static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
{
	return false;
}

#endif

1115
#endif /* _INTEL_RINGBUFFER_H_ */