intel_engine.h 17.0 KB
Newer Older
1
/* SPDX-License-Identifier: MIT */
2 3 4
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

5 6
#include <drm/drm_util.h>

7
#include <linux/hashtable.h>
8
#include <linux/irq_work.h>
9
#include <linux/random.h>
10
#include <linux/seqlock.h>
11

12
#include "i915_gem_batch_pool.h"
13
#include "i915_pmu.h"
14
#include "i915_reg.h"
15
#include "i915_request.h"
16
#include "i915_selftest.h"
17
#include "i915_timeline.h"
18
#include "intel_engine_types.h"
19
#include "intel_gpu_commands.h"
20
#include "intel_workarounds.h"
21

22
struct drm_printer;
23

24 25 26 27 28 29
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
30
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
/*
 * The register defines to be used with the following macros need to accept a
 * base param, e.g:
 *
 * REG_FOO(base) _MMIO((base) + <relative offset>)
 * ENGINE_READ(engine, REG_FOO);
 *
 * register arrays are to be defined and accessed as follows:
 *
 * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
 * ENGINE_READ_IDX(engine, REG_BAR, i)
 */

#define __ENGINE_REG_OP(op__, engine__, ...) \
	intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)

#define __ENGINE_READ_OP(op__, engine__, reg__) \
	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
50

51 52 53 54
#define ENGINE_READ16(...)	__ENGINE_READ_OP(read16, __VA_ARGS__)
#define ENGINE_READ(...)	__ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...)	__ENGINE_READ_OP(read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
T
Tvrtko Ursulin 已提交
55
#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
56

57 58 59 60
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
	__ENGINE_REG_OP(read64_2x32, (engine__), \
			lower_reg__((engine__)->mmio_base), \
			upper_reg__((engine__)->mmio_base))
61

62 63
#define ENGINE_READ_IDX(engine__, reg__, idx__) \
	__ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
64

65 66
#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
67

68 69 70
#define ENGINE_WRITE16(...)	__ENGINE_WRITE_OP(write16, __VA_ARGS__)
#define ENGINE_WRITE(...)	__ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...)	__ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
71

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#define GEN6_RING_FAULT_REG_READ(engine__) \
	intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))

#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
	intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))

#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
({ \
	u32 __val; \
\
	__val = intel_uncore_read((engine__)->uncore, \
				  RING_FAULT_REG(engine__)); \
	__val &= ~(clear__); \
	__val |= (set__); \
	intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
			   __val); \
})

90 91 92
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
93
enum intel_engine_hangcheck_action {
94 95 96 97 98 99 100
	ENGINE_IDLE = 0,
	ENGINE_WAIT,
	ENGINE_ACTIVE_SEQNO,
	ENGINE_ACTIVE_HEAD,
	ENGINE_ACTIVE_SUBUNITS,
	ENGINE_WAIT_KICK,
	ENGINE_DEAD,
101
};
102

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
static inline const char *
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
{
	switch (a) {
	case ENGINE_IDLE:
		return "idle";
	case ENGINE_WAIT:
		return "wait";
	case ENGINE_ACTIVE_SEQNO:
		return "active seqno";
	case ENGINE_ACTIVE_HEAD:
		return "active head";
	case ENGINE_ACTIVE_SUBUNITS:
		return "active subunits";
	case ENGINE_WAIT_KICK:
		return "wait kick";
	case ENGINE_DEAD:
		return "dead";
	}

	return "unknown";
}
125

126 127
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);

128 129 130 131 132 133 134
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
		     unsigned int bit)
{
	__set_bit(bit, (unsigned long *)&execlists->active);
}

135 136 137 138 139 140 141
static inline bool
execlists_set_active_once(struct intel_engine_execlists *execlists,
			  unsigned int bit)
{
	return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
}

142 143 144 145 146 147 148
static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
		       unsigned int bit)
{
	__clear_bit(bit, (unsigned long *)&execlists->active);
}

149 150 151 152 153 154
static inline void
execlists_clear_all_active(struct intel_engine_execlists *execlists)
{
	execlists->active = 0;
}

155 156 157 158 159 160 161
static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
		    unsigned int bit)
{
	return test_bit(bit, (unsigned long *)&execlists->active);
}

162 163 164 165
void execlists_user_begin(struct intel_engine_execlists *execlists,
			  const struct execlist_port *port);
void execlists_user_end(struct intel_engine_execlists *execlists);

166 167 168
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);

169
struct i915_request *
170 171
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);

172 173 174 175 176 177
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
	return execlists->port_mask + 1;
}

178
static inline struct execlist_port *
179 180 181
execlists_port_complete(struct intel_engine_execlists * const execlists,
			struct execlist_port * const port)
{
182
	const unsigned int m = execlists->port_mask;
183 184

	GEM_BUG_ON(port_index(port, execlists) != 0);
185
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
186

187 188
	memmove(port, port + 1, m * sizeof(struct execlist_port));
	memset(port + m, 0, sizeof(struct execlist_port));
189 190

	return port;
191 192
}

193
static inline u32
194
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
195
{
196
	/* Ensure that the compiler doesn't optimize away the load. */
197
	return READ_ONCE(engine->status_page.addr[reg]);
198 199
}

M
Mika Kuoppala 已提交
200
static inline void
201
intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
M
Mika Kuoppala 已提交
202
{
203 204 205 206 207 208 209
	/* Writing into the status page should be done sparingly. Since
	 * we do when we are uncertain of the device state, we take a bit
	 * of extra paranoia to try and ensure that the HWS takes the value
	 * we give and that it doesn't end up trapped inside the CPU!
	 */
	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
		mb();
210 211 212
		clflush(&engine->status_page.addr[reg]);
		engine->status_page.addr[reg] = value;
		clflush(&engine->status_page.addr[reg]);
213 214
		mb();
	} else {
215
		WRITE_ONCE(engine->status_page.addr[reg], value);
216
	}
M
Mika Kuoppala 已提交
217 218
}

219
/*
C
Chris Wilson 已提交
220 221 222 223 224 225 226 227 228 229 230
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
231
 * 0x20-0x2f: Reserved (Gen6+)
C
Chris Wilson 已提交
232
 *
233
 * The area from dword 0x30 to 0x3ff is available for driver usage.
C
Chris Wilson 已提交
234
 */
235 236
#define I915_GEM_HWS_PREEMPT		0x32
#define I915_GEM_HWS_PREEMPT_ADDR	(I915_GEM_HWS_PREEMPT * sizeof(u32))
237 238 239
#define I915_GEM_HWS_SEQNO		0x40
#define I915_GEM_HWS_SEQNO_ADDR		(I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH		0x80
240
#define I915_GEM_HWS_SCRATCH_ADDR	(I915_GEM_HWS_SCRATCH * sizeof(u32))
C
Chris Wilson 已提交
241

242
#define I915_HWS_CSB_BUF0_INDEX		0x10
243 244
#define I915_HWS_CSB_WRITE_INDEX	0x1f
#define CNL_HWS_CSB_WRITE_INDEX		0x2f
245

246
struct intel_ring *
247
intel_engine_create_ring(struct intel_engine_cs *engine,
248
			 struct i915_timeline *timeline,
249
			 int size);
250
int intel_ring_pin(struct intel_ring *ring);
251
void intel_ring_reset(struct intel_ring *ring, u32 tail);
252
unsigned int intel_ring_update_space(struct intel_ring *ring);
253
void intel_ring_unpin(struct intel_ring *ring);
254 255 256 257 258 259 260 261 262 263 264 265
void intel_ring_free(struct kref *ref);

static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
{
	kref_get(&ring->ref);
	return ring;
}

static inline void intel_ring_put(struct intel_ring *ring)
{
	kref_put(&ring->ref, intel_ring_free);
}
266

267 268
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
269

270
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
271

272
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
273

274
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
275
{
276 277 278 279 280 281 282
	/* Dummy function.
	 *
	 * This serves as a placeholder in the code so that the reader
	 * can compare against the preceding intel_ring_begin() and
	 * check that the number of dwords emitted matches the space
	 * reserved for the command packet (i.e. the value passed to
	 * intel_ring_begin()).
283
	 */
284
	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
285 286
}

287
static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
288 289 290 291
{
	return pos & (ring->size - 1);
}

292 293 294 295 296 297 298 299 300 301 302 303 304
static inline bool
intel_ring_offset_valid(const struct intel_ring *ring,
			unsigned int pos)
{
	if (pos & -ring->size) /* must be strictly within the ring */
		return false;

	if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
		return false;

	return true;
}

305
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
306 307
{
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
308 309 310
	u32 offset = addr - rq->ring->vaddr;
	GEM_BUG_ON(offset > rq->ring->size);
	return intel_ring_wrap(rq->ring, offset);
311
}
312

313 314 315
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
316
	GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335

	/*
	 * "Ring Buffer Use"
	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 *
	 * We use ring->head as the last known location of the actual RING_HEAD,
	 * it may have advanced but in the worst case it is equally the same
	 * as ring->head and so we should never program RING_TAIL to advance
	 * into the same cacheline as ring->head.
	 */
#define cacheline(a) round_down(a, CACHELINE_BYTES)
	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
		   tail < ring->head);
#undef cacheline
336 337
}

338 339 340 341 342
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
	/* Whilst writes to the tail are strictly order, there is no
	 * serialisation between readers and the writers. The tail may be
343
	 * read by i915_request_retire() just as it is being updated
344 345 346 347 348 349 350
	 * by execlists, as although the breadcrumb is complete, the context
	 * switch hasn't been seen.
	 */
	assert_ring_tail_valid(ring, tail);
	ring->tail = tail;
	return tail;
}
351

352 353 354 355 356 357 358 359 360 361 362 363
static inline unsigned int
__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
{
	/*
	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
	 * same cacheline, the Head Pointer must not be greater than the Tail
	 * Pointer."
	 */
	GEM_BUG_ON(!is_power_of_2(size));
	return (head - tail - CACHELINE_BYTES) & (size - 1);
}

364
int intel_engines_init_mmio(struct drm_i915_private *i915);
365
int intel_engines_setup(struct drm_i915_private *i915);
366 367 368
int intel_engines_init(struct drm_i915_private *i915);
void intel_engines_cleanup(struct drm_i915_private *i915);

369
int intel_engine_init_common(struct intel_engine_cs *engine);
370
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
371

372 373
int intel_ring_submission_setup(struct intel_engine_cs *engine);
int intel_ring_submission_init(struct intel_engine_cs *engine);
374

375
int intel_engine_stop_cs(struct intel_engine_cs *engine);
376
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
377

378 379
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);

380 381
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
382

383 384 385
void intel_engine_get_instdone(struct intel_engine_cs *engine,
			       struct intel_instdone *instdone);

386 387
void intel_engine_init_execlists(struct intel_engine_cs *engine);

388 389
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
390

391 392
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
393

394
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
395
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
396

397 398
static inline void
intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
399
{
400
	irq_work_queue(&engine->breadcrumbs.irq_work);
401 402
}

403
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
404

405
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
406 407
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);

408 409 410
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
				    struct drm_printer *p);

411 412 413 414 415 416 417 418 419 420 421
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
	memset(batch, 0, 6 * sizeof(u32));

	batch[0] = GFX_OP_PIPE_CONTROL(6);
	batch[1] = flags;
	batch[2] = offset;

	return batch + 6;
}

422
static inline u32 *
423
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
424 425 426 427 428 429 430 431 432
{
	/* We're using qword write, offset should be aligned to 8 bytes. */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

	/* w/a for post sync ops following a GPGPU operation we
	 * need a prior CS_STALL, which is emitted by the flush
	 * following the batch.
	 */
	*cs++ = GFX_OP_PIPE_CONTROL(6);
433
	*cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
434 435 436 437 438 439 440 441 442 443
	*cs++ = gtt_offset;
	*cs++ = 0;
	*cs++ = value;
	/* We're thrashing one dword of HWS. */
	*cs++ = 0;

	return cs;
}

static inline u32 *
444
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
445 446 447 448 449 450
{
	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
	GEM_BUG_ON(gtt_offset & (1 << 5));
	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));

451
	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
452 453 454 455 456 457 458
	*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0;
	*cs++ = value;

	return cs;
}

459 460 461 462 463
static inline void intel_engine_reset(struct intel_engine_cs *engine,
				      bool stalled)
{
	if (engine->reset.reset)
		engine->reset.reset(engine, stalled);
464
	engine->serial++; /* contexts lost */
465 466
}

467
bool intel_engine_is_idle(struct intel_engine_cs *engine);
468
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
469

470
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
471
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
472

473
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
474

475 476 477 478
__printf(3, 4)
void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...);
479

480 481 482
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);

483 484 485 486 487 488 489
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

490
	write_seqlock_irqsave(&engine->stats.lock, flags);
491 492 493 494 495 496 497

	if (engine->stats.enabled > 0) {
		if (engine->stats.active++ == 0)
			engine->stats.start = ktime_get();
		GEM_BUG_ON(engine->stats.active == 0);
	}

498
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
499 500 501 502 503 504 505 506 507
}

static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
	unsigned long flags;

	if (READ_ONCE(engine->stats.enabled) == 0)
		return;

508
	write_seqlock_irqsave(&engine->stats.lock, flags);
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

	if (engine->stats.enabled > 0) {
		ktime_t last;

		if (engine->stats.active && --engine->stats.active == 0) {
			/*
			 * Decrement the active context count and in case GPU
			 * is now idle add up to the running total.
			 */
			last = ktime_sub(ktime_get(), engine->stats.start);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		} else if (engine->stats.active == 0) {
			/*
			 * After turning on engine stats, context out might be
			 * the first event in which case we account from the
			 * time stats gathering was turned on.
			 */
			last = ktime_sub(ktime_get(), engine->stats.enabled_at);

			engine->stats.total = ktime_add(engine->stats.total,
							last);
		}
	}

535
	write_sequnlock_irqrestore(&engine->stats.lock, flags);
536 537 538 539 540 541 542
}

int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);

ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);

543 544 545
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);

546 547
u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
{
	if (!execlists->preempt_hang.inject_hang)
		return false;

	complete(&execlists->preempt_hang.completion);
	return true;
}

#else

static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
{
	return false;
}

#endif

568 569 570 571 572 573
void intel_engine_init_active(struct intel_engine_cs *engine,
			      unsigned int subclass);
#define ENGINE_PHYSICAL	0
#define ENGINE_MOCK	1
#define ENGINE_VIRTUAL	2

574
#endif /* _INTEL_RINGBUFFER_H_ */