i915_gpu_error.c 45.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright (c) 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *    Mika Kuoppala <mika.kuoppala@intel.com>
 *
 */

C
Chris Wilson 已提交
30 31
#include <linux/ascii85.h>
#include <linux/nmi.h>
32
#include <linux/pagevec.h>
C
Chris Wilson 已提交
33 34
#include <linux/scatterlist.h>
#include <linux/utsname.h>
35
#include <linux/zlib.h>
C
Chris Wilson 已提交
36

37 38
#include <drm/drm_print.h>

39
#include "display/intel_atomic.h"
40
#include "display/intel_csr.h"
41 42
#include "display/intel_overlay.h"

43
#include "gem/i915_gem_context.h"
44
#include "gem/i915_gem_lmem.h"
45
#include "gt/intel_gt.h"
46
#include "gt/intel_gt_pm.h"
47

48
#include "i915_drv.h"
49
#include "i915_gpu_error.h"
50
#include "i915_memcpy.h"
51
#include "i915_scatterlist.h"
52

53 54 55
#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)

C
Chris Wilson 已提交
56 57
static void __sg_set_buf(struct scatterlist *sg,
			 void *addr, unsigned int len, loff_t it)
58
{
C
Chris Wilson 已提交
59 60 61 62
	sg->page_link = (unsigned long)virt_to_page(addr);
	sg->offset = offset_in_page(addr);
	sg->length = len;
	sg->dma_address = it;
63 64
}

C
Chris Wilson 已提交
65
static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
66
{
C
Chris Wilson 已提交
67
	if (!len)
68 69
		return false;

C
Chris Wilson 已提交
70 71 72 73 74 75 76 77
	if (e->bytes + len + 1 <= e->size)
		return true;

	if (e->bytes) {
		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
		e->iter += e->bytes;
		e->buf = NULL;
		e->bytes = 0;
78 79
	}

C
Chris Wilson 已提交
80 81
	if (e->cur == e->end) {
		struct scatterlist *sgl;
82

83
		sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
C
Chris Wilson 已提交
84 85 86 87
		if (!sgl) {
			e->err = -ENOMEM;
			return false;
		}
88

C
Chris Wilson 已提交
89 90 91 92 93 94 95
		if (e->cur) {
			e->cur->offset = 0;
			e->cur->length = 0;
			e->cur->page_link =
				(unsigned long)sgl | SG_CHAIN;
		} else {
			e->sgl = sgl;
96 97
		}

C
Chris Wilson 已提交
98 99
		e->cur = sgl;
		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
100 101
	}

C
Chris Wilson 已提交
102
	e->size = ALIGN(len + 1, SZ_64K);
103
	e->buf = kmalloc(e->size, ALLOW_FAIL);
C
Chris Wilson 已提交
104 105 106 107 108 109 110 111 112 113
	if (!e->buf) {
		e->size = PAGE_ALIGN(len + 1);
		e->buf = kmalloc(e->size, GFP_KERNEL);
	}
	if (!e->buf) {
		e->err = -ENOMEM;
		return false;
	}

	return true;
114 115
}

116
__printf(2, 0)
117
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
C
Chris Wilson 已提交
118
			       const char *fmt, va_list args)
119
{
C
Chris Wilson 已提交
120 121
	va_list ap;
	int len;
122

C
Chris Wilson 已提交
123
	if (e->err)
124 125
		return;

C
Chris Wilson 已提交
126 127 128 129 130 131
	va_copy(ap, args);
	len = vsnprintf(NULL, 0, fmt, ap);
	va_end(ap);
	if (len <= 0) {
		e->err = len;
		return;
132 133
	}

C
Chris Wilson 已提交
134 135
	if (!__i915_error_grow(e, len))
		return;
136

C
Chris Wilson 已提交
137 138 139 140 141 142 143
	GEM_BUG_ON(e->bytes >= e->size);
	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
	if (len < 0) {
		e->err = len;
		return;
	}
	e->bytes += len;
144 145
}

C
Chris Wilson 已提交
146
static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
147 148 149
{
	unsigned len;

C
Chris Wilson 已提交
150
	if (e->err || !str)
151 152 153
		return;

	len = strlen(str);
C
Chris Wilson 已提交
154 155
	if (!__i915_error_grow(e, len))
		return;
156

C
Chris Wilson 已提交
157
	GEM_BUG_ON(e->bytes + len > e->size);
158
	memcpy(e->buf + e->bytes, str, len);
C
Chris Wilson 已提交
159
	e->bytes += len;
160 161 162 163 164
}

#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
{
	i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
}

static inline struct drm_printer
i915_error_printer(struct drm_i915_error_state_buf *e)
{
	struct drm_printer p = {
		.printfn = __i915_printfn_error,
		.arg = e,
	};
	return p;
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/* single threaded page allocator with a reserved stash for emergencies */
static void pool_fini(struct pagevec *pv)
{
	pagevec_release(pv);
}

static int pool_refill(struct pagevec *pv, gfp_t gfp)
{
	while (pagevec_space(pv)) {
		struct page *p;

		p = alloc_page(gfp);
		if (!p)
			return -ENOMEM;

		pagevec_add(pv, p);
	}

	return 0;
}

static int pool_init(struct pagevec *pv, gfp_t gfp)
{
	int err;

	pagevec_init(pv);

	err = pool_refill(pv, gfp);
	if (err)
		pool_fini(pv);

	return err;
}

static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
{
	struct page *p;

	p = alloc_page(gfp);
	if (!p && pagevec_count(pv))
		p = pv->pages[--pv->nr];

	return p ? page_address(p) : NULL;
}

static void pool_free(struct pagevec *pv, void *addr)
{
	struct page *p = virt_to_page(addr);

	if (pagevec_space(pv))
		pagevec_add(pv, p);
	else
		__free_page(p);
}

235 236
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR

237
struct i915_vma_compress {
238
	struct pagevec pool;
239 240 241 242
	struct z_stream_s zstream;
	void *tmp;
};

243
static bool compress_init(struct i915_vma_compress *c)
244
{
245
	struct z_stream_s *zstream = &c->zstream;
246

247
	if (pool_init(&c->pool, ALLOW_FAIL))
248 249
		return false;

250 251 252 253 254
	zstream->workspace =
		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
			ALLOW_FAIL);
	if (!zstream->workspace) {
		pool_fini(&c->pool);
255 256 257
		return false;
	}

258
	c->tmp = NULL;
259
	if (i915_has_memcpy_from_wc())
260
		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
261

262 263 264
	return true;
}

265
static bool compress_start(struct i915_vma_compress *c)
266
{
267 268 269 270 271 272 273 274 275
	struct z_stream_s *zstream = &c->zstream;
	void *workspace = zstream->workspace;

	memset(zstream, 0, sizeof(*zstream));
	zstream->workspace = workspace;

	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
}

276 277
static void *compress_next_page(struct i915_vma_compress *c,
				struct i915_vma_coredump *dst)
278 279
{
	void *page;
280 281 282 283

	if (dst->page_count >= dst->num_pages)
		return ERR_PTR(-ENOSPC);

284
	page = pool_alloc(&c->pool, ALLOW_FAIL);
285 286 287
	if (!page)
		return ERR_PTR(-ENOMEM);

288
	return dst->pages[dst->page_count++] = page;
289 290
}

291
static int compress_page(struct i915_vma_compress *c,
292
			 void *src,
293 294
			 struct i915_vma_coredump *dst,
			 bool wc)
295
{
296 297
	struct z_stream_s *zstream = &c->zstream;

298
	zstream->next_in = src;
299
	if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
300
		zstream->next_in = c->tmp;
301 302 303 304
	zstream->avail_in = PAGE_SIZE;

	do {
		if (zstream->avail_out == 0) {
305
			zstream->next_out = compress_next_page(c, dst);
306 307
			if (IS_ERR(zstream->next_out))
				return PTR_ERR(zstream->next_out);
308 309 310 311

			zstream->avail_out = PAGE_SIZE;
		}

312
		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
313
			return -EIO;
314 315

		cond_resched();
316 317 318 319 320 321 322 323 324
	} while (zstream->avail_in);

	/* Fallback to uncompressed if we increase size? */
	if (0 && zstream->total_out > zstream->total_in)
		return -E2BIG;

	return 0;
}

325 326
static int compress_flush(struct i915_vma_compress *c,
			  struct i915_vma_coredump *dst)
327
{
328 329
	struct z_stream_s *zstream = &c->zstream;

330 331 332
	do {
		switch (zlib_deflate(zstream, Z_FINISH)) {
		case Z_OK: /* more space requested */
333
			zstream->next_out = compress_next_page(c, dst);
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
			if (IS_ERR(zstream->next_out))
				return PTR_ERR(zstream->next_out);

			zstream->avail_out = PAGE_SIZE;
			break;

		case Z_STREAM_END:
			goto end;

		default: /* any error */
			return -EIO;
		}
	} while (1);

end:
	memset(zstream->next_out, 0, zstream->avail_out);
	dst->unused = zstream->avail_out;
	return 0;
}

354
static void compress_finish(struct i915_vma_compress *c)
355
{
356 357
	zlib_deflateEnd(&c->zstream);
}
358

359
static void compress_fini(struct i915_vma_compress *c)
360 361
{
	kfree(c->zstream.workspace);
362
	if (c->tmp)
363 364
		pool_free(&c->pool, c->tmp);
	pool_fini(&c->pool);
365 366 367 368 369 370 371 372 373
}

static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
	err_puts(m, ":");
}

#else

374
struct i915_vma_compress {
375
	struct pagevec pool;
376 377
};

378
static bool compress_init(struct i915_vma_compress *c)
379 380 381 382
{
	return pool_init(&c->pool, ALLOW_FAIL) == 0;
}

383
static bool compress_start(struct i915_vma_compress *c)
384 385 386 387
{
	return true;
}

388
static int compress_page(struct i915_vma_compress *c,
389
			 void *src,
390 391
			 struct i915_vma_coredump *dst,
			 bool wc)
392
{
393
	void *ptr;
394

395
	ptr = pool_alloc(&c->pool, ALLOW_FAIL);
396
	if (!ptr)
397 398
		return -ENOMEM;

399
	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
400 401
		memcpy(ptr, src, PAGE_SIZE);
	dst->pages[dst->page_count++] = ptr;
402
	cond_resched();
403 404 405 406

	return 0;
}

407 408
static int compress_flush(struct i915_vma_compress *c,
			  struct i915_vma_coredump *dst)
409 410 411 412
{
	return 0;
}

413
static void compress_finish(struct i915_vma_compress *c)
414 415 416
{
}

417
static void compress_fini(struct i915_vma_compress *c)
418 419 420 421
{
	pool_fini(&c->pool);
}

422 423 424 425 426 427 428
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
	err_puts(m, "~");
}

#endif

429
static void error_print_instdone(struct drm_i915_error_state_buf *m,
430
				 const struct intel_engine_coredump *ee)
431
{
432
	const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
433 434 435
	int slice;
	int subslice;

436 437 438
	err_printf(m, "  INSTDONE: 0x%08x\n",
		   ee->instdone.instdone);

439
	if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
440 441 442 443 444 445 446 447
		return;

	err_printf(m, "  SC_INSTDONE: 0x%08x\n",
		   ee->instdone.slice_common);

	if (INTEL_GEN(m->i915) <= 6)
		return;

448
	for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
449 450 451 452
		err_printf(m, "  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
			   slice, subslice,
			   ee->instdone.sampler[slice][subslice]);

453
	for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
454 455 456
		err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
			   slice, subslice,
			   ee->instdone.row[slice][subslice]);
457 458 459 460 461 462 463 464

	if (INTEL_GEN(m->i915) < 12)
		return;

	err_printf(m, "  SC_INSTDONE_EXTRA: 0x%08x\n",
		   ee->instdone.slice_common_extra[0]);
	err_printf(m, "  SC_INSTDONE_EXTRA2: 0x%08x\n",
		   ee->instdone.slice_common_extra[1]);
465 466
}

467 468
static void error_print_request(struct drm_i915_error_state_buf *m,
				const char *prefix,
469
				const struct i915_request_coredump *erq)
470 471 472 473
{
	if (!erq->seqno)
		return;

474
	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
475
		   prefix, erq->pid, erq->context, erq->seqno,
476 477 478 479 480
		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
			    &erq->flags) ? "!" : "",
		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
			    &erq->flags) ? "+" : "",
		   erq->sched_attr.priority,
481
		   erq->head, erq->tail);
482 483
}

484 485
static void error_print_context(struct drm_i915_error_state_buf *m,
				const char *header,
486
				const struct i915_gem_context_coredump *ctx)
487
{
488 489 490
	const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;

	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
C
Chris Wilson 已提交
491
		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
492 493 494
		   ctx->guilty, ctx->active,
		   ctx->total_runtime * period,
		   mul_u32_u32(ctx->avg_runtime, period));
495 496
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
static struct i915_vma_coredump *
__find_vma(struct i915_vma_coredump *vma, const char *name)
{
	while (vma) {
		if (strcmp(vma->name, name) == 0)
			return vma;
		vma = vma->next;
	}

	return NULL;
}

static struct i915_vma_coredump *
find_batch(const struct intel_engine_coredump *ee)
{
	return __find_vma(ee->vma, "batch");
}

515
static void error_print_engine(struct drm_i915_error_state_buf *m,
516
			       const struct intel_engine_coredump *ee)
517
{
518
	struct i915_vma_coredump *batch;
519 520
	int n;

521
	err_printf(m, "%s command stream:\n", ee->engine->name);
522
	err_printf(m, "  CCID:  0x%08x\n", ee->ccid);
523
	err_printf(m, "  START: 0x%08x\n", ee->start);
524
	err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
525 526
	err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
		   ee->tail, ee->rq_post, ee->rq_tail);
527
	err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
528
	err_printf(m, "  MODE:  0x%08x\n", ee->mode);
529 530 531 532 533
	err_printf(m, "  HWS:   0x%08x\n", ee->hws);
	err_printf(m, "  ACTHD: 0x%08x %08x\n",
		   (u32)(ee->acthd>>32), (u32)ee->acthd);
	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
534
	err_printf(m, "  ESR:   0x%08x\n", ee->esr);
535 536 537

	error_print_instdone(m, ee);

538 539 540 541
	batch = find_batch(ee);
	if (batch) {
		u64 start = batch->gtt_offset;
		u64 end = start + batch->gtt_size;
542 543 544 545 546

		err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
			   upper_32_bits(start), lower_32_bits(start),
			   upper_32_bits(end), lower_32_bits(end));
	}
547
	if (INTEL_GEN(m->i915) >= 4) {
548
		err_printf(m, "  BBADDR: 0x%08x_%08x\n",
549 550 551
			   (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
		err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
		err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
552
	}
553 554 555 556 557 558
	err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
		   lower_32_bits(ee->faddr));
	if (INTEL_GEN(m->i915) >= 6) {
		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
559
	}
560
	if (HAS_PPGTT(m->i915)) {
561
		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
562

563
		if (INTEL_GEN(m->i915) >= 8) {
564 565 566
			int i;
			for (i = 0; i < 4; i++)
				err_printf(m, "  PDP%d: 0x%016llx\n",
567
					   i, ee->vm_info.pdp[i]);
568 569
		} else {
			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
570
				   ee->vm_info.pp_dir_base);
571 572
		}
	}
573
	err_printf(m, "  engine reset count: %u\n", ee->reset_count);
574

575 576
	for (n = 0; n < ee->num_ports; n++) {
		err_printf(m, "  ELSP[%d]:", n);
577
		error_print_request(m, " ", &ee->execlist[n]);
578 579
	}

580
	error_print_context(m, "  Active context: ", &ee->context);
581 582 583 584 585 586 587 588 589 590 591
}

void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
{
	va_list args;

	va_start(args, f);
	i915_error_vprintf(e, f, args);
	va_end(args);
}

592
static void print_error_vma(struct drm_i915_error_state_buf *m,
593
			    const struct intel_engine_cs *engine,
594
			    const struct i915_vma_coredump *vma)
595
{
596
	char out[ASCII85_BUFSZ];
597
	int page;
598

599
	if (!vma)
600 601
		return;

602 603 604 605
	err_printf(m, "%s --- %s = 0x%08x %08x\n",
		   engine ? engine->name : "global", vma->name,
		   upper_32_bits(vma->gtt_offset),
		   lower_32_bits(vma->gtt_offset));
606

607 608
	if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
		err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
609

610
	err_compression_marker(m);
611
	for (page = 0; page < vma->page_count; page++) {
612 613 614
		int i, len;

		len = PAGE_SIZE;
615 616
		if (page == vma->page_count - 1)
			len -= vma->unused;
617 618
		len = ascii85_encode_len(len);

619
		for (i = 0; i < len; i++)
620
			err_puts(m, ascii85_encode(vma->pages[page][i], out));
621
	}
622
	err_puts(m, "\n");
623 624
}

625
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
626
				   struct i915_gpu_coredump *error)
627
{
628 629
	struct drm_printer p = i915_error_printer(m);

630 631 632
	intel_device_info_print_static(&error->device_info, &p);
	intel_device_info_print_runtime(&error->runtime_info, &p);
	intel_driver_caps_print(&error->driver_caps, &p);
633 634
}

635
static void err_print_params(struct drm_i915_error_state_buf *m,
636
			     const struct i915_params *params)
637
{
638 639 640
	struct drm_printer p = i915_error_printer(m);

	i915_params_dump(params, &p);
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654
static void err_print_pciid(struct drm_i915_error_state_buf *m,
			    struct drm_i915_private *i915)
{
	struct pci_dev *pdev = i915->drm.pdev;

	err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
	err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
	err_printf(m, "PCI Subsystem: %04x:%04x\n",
		   pdev->subsystem_vendor,
		   pdev->subsystem_device);
}

655
static void err_print_uc(struct drm_i915_error_state_buf *m,
656
			 const struct intel_uc_coredump *error_uc)
657 658 659 660 661
{
	struct drm_printer p = i915_error_printer(m);

	intel_uc_fw_dump(&error_uc->guc_fw, &p);
	intel_uc_fw_dump(&error_uc->huc_fw, &p);
662
	print_error_vma(m, NULL, error_uc->guc_log);
663 664
}

C
Chris Wilson 已提交
665
static void err_free_sgl(struct scatterlist *sgl)
666
{
C
Chris Wilson 已提交
667 668
	while (sgl) {
		struct scatterlist *sg;
669

C
Chris Wilson 已提交
670 671 672 673 674 675 676 677 678
		for (sg = sgl; !sg_is_chain(sg); sg++) {
			kfree(sg_virt(sg));
			if (sg_is_last(sg))
				break;
		}

		sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
		free_page((unsigned long)sgl);
		sgl = sg;
679
	}
C
Chris Wilson 已提交
680
}
681

682 683 684 685 686 687 688 689 690
static void err_print_gt_info(struct drm_i915_error_state_buf *m,
			      struct intel_gt_coredump *gt)
{
	struct drm_printer p = i915_error_printer(m);

	intel_gt_info_print(&gt->info, &p);
	intel_sseu_print_topology(&gt->info.sseu, &p);
}

691 692 693 694
static void err_print_gt(struct drm_i915_error_state_buf *m,
			 struct intel_gt_coredump *gt)
{
	const struct intel_engine_coredump *ee;
695
	int i;
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746

	err_printf(m, "GT awake: %s\n", yesno(gt->awake));
	err_printf(m, "EIR: 0x%08x\n", gt->eir);
	err_printf(m, "IER: 0x%08x\n", gt->ier);
	for (i = 0; i < gt->ngtier; i++)
		err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
	err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
	err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
	err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);

	for (i = 0; i < gt->nfence; i++)
		err_printf(m, "  fence[%d] = %08llx\n", i, gt->fence[i]);

	if (IS_GEN_RANGE(m->i915, 6, 11)) {
		err_printf(m, "ERROR: 0x%08x\n", gt->error);
		err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
	}

	if (INTEL_GEN(m->i915) >= 8)
		err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
			   gt->fault_data1, gt->fault_data0);

	if (IS_GEN(m->i915, 7))
		err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);

	if (IS_GEN_RANGE(m->i915, 8, 11))
		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);

	if (IS_GEN(m->i915, 12))
		err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);

	if (INTEL_GEN(m->i915) >= 12) {
		int i;

		for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
			err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
				   gt->sfc_done[i]);

		err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
	}

	for (ee = gt->engine; ee; ee = ee->next) {
		const struct i915_vma_coredump *vma;

		error_print_engine(m, ee);
		for (vma = ee->vma; vma; vma = vma->next)
			print_error_vma(m, ee->engine, vma);
	}

	if (gt->uc)
		err_print_uc(m, gt->uc);
747 748

	err_print_gt_info(m, gt);
749 750
}

C
Chris Wilson 已提交
751
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
752
			       struct i915_gpu_coredump *error)
C
Chris Wilson 已提交
753
{
754
	const struct intel_engine_coredump *ee;
C
Chris Wilson 已提交
755
	struct timespec64 ts;
756

757 758
	if (*error->error_msg)
		err_printf(m, "%s\n", error->error_msg);
759 760 761
	err_printf(m, "Kernel: %s %s\n",
		   init_utsname()->release,
		   init_utsname()->machine);
762
	err_printf(m, "Driver: %s\n", DRIVER_DATE);
A
Arnd Bergmann 已提交
763 764 765 766 767 768 769 770 771
	ts = ktime_to_timespec64(error->time);
	err_printf(m, "Time: %lld s %ld us\n",
		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
	ts = ktime_to_timespec64(error->boottime);
	err_printf(m, "Boottime: %lld s %ld us\n",
		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
	ts = ktime_to_timespec64(error->uptime);
	err_printf(m, "Uptime: %lld s %ld us\n",
		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
772 773
	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
		   error->capture, jiffies_to_msecs(jiffies - error->capture));
774

775
	for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
776
		err_printf(m, "Active process (on ring %s): %s [%d]\n",
777 778 779 780
			   ee->engine->name,
			   ee->context.comm,
			   ee->context.pid);

781
	err_printf(m, "Reset count: %u\n", error->reset_count);
782
	err_printf(m, "Suspend count: %u\n", error->suspend_count);
783
	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
784 785 786
	err_printf(m, "Subplatform: 0x%x\n",
		   intel_subplatform(&error->runtime_info,
				     error->device_info.platform));
C
Chris Wilson 已提交
787
	err_print_pciid(m, m->i915);
788

789
	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
790

C
Chris Wilson 已提交
791 792
	if (HAS_CSR(m->i915)) {
		struct intel_csr *csr = &m->i915->csr;
793 794 795 796 797 798 799 800

		err_printf(m, "DMC loaded: %s\n",
			   yesno(csr->dmc_payload != NULL));
		err_printf(m, "DMC fw version: %d.%d\n",
			   CSR_VERSION_MAJOR(csr->version),
			   CSR_VERSION_MINOR(csr->version));
	}

801 802
	err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
	err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
803

804 805
	if (error->gt)
		err_print_gt(m, error->gt);
806 807 808 809 810

	if (error->overlay)
		intel_overlay_print_error_state(m, error->overlay);

	if (error->display)
811
		intel_display_print_error_state(m, error->display);
812

813
	err_print_capabilities(m, error);
814
	err_print_params(m, &error->params);
C
Chris Wilson 已提交
815 816
}

817
static int err_print_to_sgl(struct i915_gpu_coredump *error)
C
Chris Wilson 已提交
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
{
	struct drm_i915_error_state_buf m;

	if (IS_ERR(error))
		return PTR_ERR(error);

	if (READ_ONCE(error->sgl))
		return 0;

	memset(&m, 0, sizeof(m));
	m.i915 = error->i915;

	__err_print_to_sgl(&m, error);

	if (m.buf) {
		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
		m.bytes = 0;
		m.buf = NULL;
	}
	if (m.cur) {
		GEM_BUG_ON(m.end < m.cur);
		sg_mark_end(m.cur - 1);
	}
	GEM_BUG_ON(m.sgl && !m.cur);

	if (m.err) {
		err_free_sgl(m.sgl);
		return m.err;
	}
847

C
Chris Wilson 已提交
848 849
	if (cmpxchg(&error->sgl, NULL, m.sgl))
		err_free_sgl(m.sgl);
850 851 852 853

	return 0;
}

854 855
ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
					 char *buf, loff_t off, size_t rem)
856
{
C
Chris Wilson 已提交
857 858 859 860
	struct scatterlist *sg;
	size_t count;
	loff_t pos;
	int err;
861

C
Chris Wilson 已提交
862 863
	if (!error || !rem)
		return 0;
864

C
Chris Wilson 已提交
865 866 867
	err = err_print_to_sgl(error);
	if (err)
		return err;
868

C
Chris Wilson 已提交
869 870 871 872 873
	sg = READ_ONCE(error->fit);
	if (!sg || off < sg->dma_address)
		sg = error->sgl;
	if (!sg)
		return 0;
874

C
Chris Wilson 已提交
875 876 877 878 879 880 881 882 883
	pos = sg->dma_address;
	count = 0;
	do {
		size_t len, start;

		if (sg_is_chain(sg)) {
			sg = sg_chain_ptr(sg);
			GEM_BUG_ON(sg_is_chain(sg));
		}
884

C
Chris Wilson 已提交
885 886 887 888 889
		len = sg->length;
		if (pos + len <= off) {
			pos += len;
			continue;
		}
890

C
Chris Wilson 已提交
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
		start = sg->offset;
		if (pos < off) {
			GEM_BUG_ON(off - pos > len);
			len -= off - pos;
			start += off - pos;
			pos = off;
		}

		len = min(len, rem);
		GEM_BUG_ON(!len || len > sg->length);

		memcpy(buf, page_address(sg_page(sg)) + start, len);

		count += len;
		pos += len;

		buf += len;
		rem -= len;
		if (!rem) {
			WRITE_ONCE(error->fit, sg);
			break;
		}
	} while (!sg_is_last(sg++));

	return count;
916 917
}

918
static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
919
{
920 921 922
	while (vma) {
		struct i915_vma_coredump *next = vma->next;
		int page;
923

924 925
		for (page = 0; page < vma->page_count; page++)
			free_page((unsigned long)vma->pages[page]);
926

927 928 929
		kfree(vma);
		vma = next;
	}
930 931
}

932
static void cleanup_params(struct i915_gpu_coredump *error)
933
{
934
	i915_params_free(&error->params);
935 936
}

937
static void cleanup_uc(struct intel_uc_coredump *uc)
938
{
939 940 941
	kfree(uc->guc_fw.path);
	kfree(uc->huc_fw.path);
	i915_vma_coredump_free(uc->guc_log);
942

943
	kfree(uc);
944 945
}

946
static void cleanup_gt(struct intel_gt_coredump *gt)
947
{
948 949 950 951
	while (gt->engine) {
		struct intel_engine_coredump *ee = gt->engine;

		gt->engine = ee->next;
952

953 954 955
		i915_vma_coredump_free(ee->vma);
		kfree(ee);
	}
956

957 958
	if (gt->uc)
		cleanup_uc(gt->uc);
959

960 961
	kfree(gt);
}
962

963 964 965 966
void __i915_gpu_coredump_free(struct kref *error_ref)
{
	struct i915_gpu_coredump *error =
		container_of(error_ref, typeof(*error), ref);
967

968 969 970 971 972
	while (error->gt) {
		struct intel_gt_coredump *gt = error->gt;

		error->gt = gt->next;
		cleanup_gt(gt);
973 974 975 976
	}

	kfree(error->overlay);
	kfree(error->display);
977

978
	cleanup_params(error);
979

C
Chris Wilson 已提交
980
	err_free_sgl(error->sgl);
981 982 983
	kfree(error);
}

984 985 986 987 988
static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt *gt,
			 const struct i915_vma *vma,
			 const char *name,
			 struct i915_vma_compress *compress)
989
{
990
	struct i915_ggtt *ggtt = gt->ggtt;
991
	const u64 slot = ggtt->error_capture.start;
992
	struct i915_vma_coredump *dst;
993 994
	unsigned long num_pages;
	struct sgt_iter iter;
995
	int ret;
996

997 998
	might_sleep();

999
	if (!vma || !vma->pages || !compress)
C
Chris Wilson 已提交
1000 1001
		return NULL;

1002
	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
1003
	num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
1004
	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
C
Chris Wilson 已提交
1005
	if (!dst)
1006 1007
		return NULL;

1008 1009 1010 1011 1012
	if (!compress_start(compress)) {
		kfree(dst);
		return NULL;
	}

1013 1014 1015
	strcpy(dst->name, name);
	dst->next = NULL;

1016 1017
	dst->gtt_offset = vma->node.start;
	dst->gtt_size = vma->node.size;
1018
	dst->gtt_page_sizes = vma->page_sizes.gtt;
1019
	dst->num_pages = num_pages;
1020
	dst->page_count = 0;
1021 1022
	dst->unused = 0;

1023
	ret = -EINVAL;
1024
	if (drm_mm_node_allocated(&ggtt->error_capture)) {
1025
		void __iomem *s;
1026
		dma_addr_t dma;
1027

1028
		for_each_sgt_daddr(dma, iter, vma->pages) {
1029
			mutex_lock(&ggtt->error_mutex);
1030 1031
			ggtt->vm.insert_page(&ggtt->vm, dma, slot,
					     I915_CACHE_NONE, 0);
1032
			mb();
1033

1034
			s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1035 1036 1037
			ret = compress_page(compress,
					    (void  __force *)s, dst,
					    true);
1038
			io_mapping_unmap(s);
1039 1040 1041 1042

			mb();
			ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
			mutex_unlock(&ggtt->error_mutex);
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
			if (ret)
				break;
		}
	} else if (i915_gem_object_is_lmem(vma->obj)) {
		struct intel_memory_region *mem = vma->obj->mm.region;
		dma_addr_t dma;

		for_each_sgt_daddr(dma, iter, vma->pages) {
			void __iomem *s;

1053
			s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
1054 1055 1056
			ret = compress_page(compress,
					    (void __force *)s, dst,
					    true);
1057
			io_mapping_unmap(s);
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
			if (ret)
				break;
		}
	} else {
		struct page *page;

		for_each_sgt_page(page, iter, vma->pages) {
			void *s;

			drm_clflush_pages(&page, 1);

1069
			s = kmap(page);
1070
			ret = compress_page(compress, s, dst, false);
1071
			kunmap(page);
1072 1073 1074 1075 1076 1077

			drm_clflush_pages(&page, 1);

			if (ret)
				break;
		}
1078 1079
	}

1080
	if (ret || compress_flush(compress, dst)) {
1081
		while (dst->page_count--)
1082
			pool_free(&compress->pool, dst->pages[dst->page_count]);
1083 1084 1085
		kfree(dst);
		dst = NULL;
	}
1086
	compress_finish(compress);
1087 1088

	return dst;
1089 1090
}

1091
static void gt_record_fences(struct intel_gt_coredump *gt)
1092
{
1093 1094
	struct i915_ggtt *ggtt = gt->_gt->ggtt;
	struct intel_uncore *uncore = gt->_gt->uncore;
1095 1096
	int i;

1097 1098 1099
	if (INTEL_GEN(uncore->i915) >= 6) {
		for (i = 0; i < ggtt->num_fences; i++)
			gt->fence[i] =
1100 1101
				intel_uncore_read64(uncore,
						    FENCE_REG_GEN6_LO(i));
1102 1103 1104
	} else if (INTEL_GEN(uncore->i915) >= 4) {
		for (i = 0; i < ggtt->num_fences; i++)
			gt->fence[i] =
1105 1106
				intel_uncore_read64(uncore,
						    FENCE_REG_965_LO(i));
1107
	} else {
1108 1109
		for (i = 0; i < ggtt->num_fences; i++)
			gt->fence[i] =
1110
				intel_uncore_read(uncore, FENCE_REG(i));
1111
	}
1112
	gt->nfence = i;
1113 1114
}

1115
static void engine_record_registers(struct intel_engine_coredump *ee)
1116
{
1117 1118
	const struct intel_engine_cs *engine = ee->engine;
	struct drm_i915_private *i915 = engine->i915;
1119

1120
	if (INTEL_GEN(i915) >= 6) {
1121
		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1122

1123 1124 1125 1126 1127 1128
		if (INTEL_GEN(i915) >= 12)
			ee->fault_reg = intel_uncore_read(engine->uncore,
							  GEN12_RING_FAULT_REG);
		else if (INTEL_GEN(i915) >= 8)
			ee->fault_reg = intel_uncore_read(engine->uncore,
							  GEN8_RING_FAULT_REG);
1129
		else
1130
			ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1131 1132
	}

1133
	if (INTEL_GEN(i915) >= 4) {
1134
		ee->esr = ENGINE_READ(engine, RING_ESR);
1135 1136 1137 1138 1139
		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
		ee->instps = ENGINE_READ(engine, RING_INSTPS);
		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1140 1141
		ee->ccid = ENGINE_READ(engine, CCID);
		if (INTEL_GEN(i915) >= 8) {
1142 1143
			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1144
		}
1145
		ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1146
	} else {
1147 1148 1149
		ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
		ee->ipeir = ENGINE_READ(engine, IPEIR);
		ee->ipehr = ENGINE_READ(engine, IPEHR);
1150 1151
	}

1152
	intel_engine_get_instdone(engine, &ee->instdone);
1153

1154
	ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1155
	ee->acthd = intel_engine_get_active_head(engine);
1156 1157 1158 1159
	ee->start = ENGINE_READ(engine, RING_START);
	ee->head = ENGINE_READ(engine, RING_HEAD);
	ee->tail = ENGINE_READ(engine, RING_TAIL);
	ee->ctl = ENGINE_READ(engine, RING_CTL);
1160
	if (INTEL_GEN(i915) > 2)
1161
		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1162

1163
	if (!HWS_NEEDS_PHYSICAL(i915)) {
1164
		i915_reg_t mmio;
1165

1166
		if (IS_GEN(i915, 7)) {
1167
			switch (engine->id) {
1168
			default:
1169
				MISSING_CASE(engine->id);
1170
				/* fall through */
1171
			case RCS0:
1172 1173
				mmio = RENDER_HWS_PGA_GEN7;
				break;
1174
			case BCS0:
1175 1176
				mmio = BLT_HWS_PGA_GEN7;
				break;
1177
			case VCS0:
1178 1179
				mmio = BSD_HWS_PGA_GEN7;
				break;
1180
			case VECS0:
1181 1182 1183
				mmio = VEBOX_HWS_PGA_GEN7;
				break;
			}
1184
		} else if (IS_GEN(engine->i915, 6)) {
1185
			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1186 1187
		} else {
			/* XXX: gen8 returns to sanity */
1188
			mmio = RING_HWS_PGA(engine->mmio_base);
1189 1190
		}

1191
		ee->hws = intel_uncore_read(engine->uncore, mmio);
1192 1193
	}

1194
	ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1195

1196
	if (HAS_PPGTT(i915)) {
1197 1198
		int i;

1199
		ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1200

1201
		if (IS_GEN(i915, 6)) {
1202
			ee->vm_info.pp_dir_base =
1203
				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1204
		} else if (IS_GEN(i915, 7)) {
1205
			ee->vm_info.pp_dir_base =
1206
				ENGINE_READ(engine, RING_PP_DIR_BASE);
1207
		} else if (INTEL_GEN(i915) >= 8) {
1208 1209
			u32 base = engine->mmio_base;

1210
			for (i = 0; i < 4; i++) {
1211
				ee->vm_info.pdp[i] =
1212 1213
					intel_uncore_read(engine->uncore,
							  GEN8_RING_PDP_UDW(base, i));
1214 1215
				ee->vm_info.pdp[i] <<= 32;
				ee->vm_info.pdp[i] |=
1216 1217
					intel_uncore_read(engine->uncore,
							  GEN8_RING_PDP_LDW(base, i));
1218
			}
1219
		}
1220
	}
1221 1222
}

1223
static void record_request(const struct i915_request *request,
1224
			   struct i915_request_coredump *erq)
1225
{
1226
	erq->flags = request->fence.flags;
1227 1228
	erq->context = request->fence.context;
	erq->seqno = request->fence.seqno;
1229
	erq->sched_attr = request->sched.attr;
1230 1231
	erq->head = request->head;
	erq->tail = request->tail;
1232 1233 1234

	erq->pid = 0;
	rcu_read_lock();
1235 1236 1237 1238 1239 1240 1241
	if (!intel_context_is_closed(request->context)) {
		const struct i915_gem_context *ctx;

		ctx = rcu_dereference(request->context->gem_context);
		if (ctx)
			erq->pid = pid_nr(ctx->pid);
	}
1242
	rcu_read_unlock();
1243 1244
}

1245
static void engine_record_execlists(struct intel_engine_coredump *ee)
1246
{
1247 1248
	const struct intel_engine_execlists * const el = &ee->engine->execlists;
	struct i915_request * const *port = el->active;
1249
	unsigned int n = 0;
1250

1251 1252
	while (*port)
		record_request(*port++, &ee->execlist[n++]);
1253 1254

	ee->num_ports = n;
1255 1256
}

1257
static bool record_context(struct i915_gem_context_coredump *e,
1258
			   const struct i915_request *rq)
1259
{
1260 1261
	struct i915_gem_context *ctx;
	struct task_struct *task;
1262
	bool simulated;
1263 1264 1265 1266 1267 1268

	rcu_read_lock();
	ctx = rcu_dereference(rq->context->gem_context);
	if (ctx && !kref_get_unless_zero(&ctx->ref))
		ctx = NULL;
	rcu_read_unlock();
1269
	if (!ctx)
1270
		return true;
1271

1272 1273 1274 1275 1276
	rcu_read_lock();
	task = pid_task(ctx->pid, PIDTYPE_PID);
	if (task) {
		strcpy(e->comm, task->comm);
		e->pid = task->pid;
1277
	}
1278
	rcu_read_unlock();
1279

1280
	e->sched_attr = ctx->sched;
1281 1282
	e->guilty = atomic_read(&ctx->guilty_count);
	e->active = atomic_read(&ctx->active_count);
1283

1284 1285 1286
	e->total_runtime = rq->context->runtime.total;
	e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);

1287
	simulated = i915_gem_context_no_error_capture(ctx);
1288 1289

	i915_gem_context_put(ctx);
1290
	return simulated;
1291 1292
}

1293 1294 1295 1296
struct intel_engine_capture_vma {
	struct intel_engine_capture_vma *next;
	struct i915_vma *vma;
	char name[16];
1297 1298
};

1299 1300
static struct intel_engine_capture_vma *
capture_vma(struct intel_engine_capture_vma *next,
1301
	    struct i915_vma *vma,
1302 1303
	    const char *name,
	    gfp_t gfp)
1304
{
1305
	struct intel_engine_capture_vma *c;
1306 1307 1308 1309

	if (!vma)
		return next;

1310
	c = kmalloc(sizeof(*c), gfp);
1311 1312 1313
	if (!c)
		return next;

1314
	if (!i915_active_acquire_if_busy(&vma->active)) {
1315 1316 1317 1318
		kfree(c);
		return next;
	}

1319
	strcpy(c->name, name);
1320
	c->vma = vma; /* reference held while active */
1321 1322 1323 1324 1325

	c->next = next;
	return c;
}

1326 1327 1328 1329
static struct intel_engine_capture_vma *
capture_user(struct intel_engine_capture_vma *capture,
	     const struct i915_request *rq,
	     gfp_t gfp)
1330
{
1331
	struct i915_capture_list *c;
1332

1333 1334
	for (c = rq->capture_list; c; c = c->next)
		capture = capture_vma(capture, c->vma, "user", gfp);
1335 1336

	return capture;
1337 1338
}

1339 1340
static void add_vma(struct intel_engine_coredump *ee,
		    struct i915_vma_coredump *vma)
1341
{
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	if (vma) {
		vma->next = ee->vma;
		ee->vma = vma;
	}
}

struct intel_engine_coredump *
intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
{
	struct intel_engine_coredump *ee;
1352

1353
	ee = kzalloc(sizeof(*ee), gfp);
1354
	if (!ee)
1355
		return NULL;
1356

1357
	ee->engine = engine;
1358

1359 1360
	engine_record_registers(ee);
	engine_record_execlists(ee);
1361

1362 1363
	return ee;
}
1364

1365 1366 1367 1368 1369 1370
struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
				  struct i915_request *rq,
				  gfp_t gfp)
{
	struct intel_engine_capture_vma *vma = NULL;
1371

1372 1373 1374
	ee->simulated |= record_context(&ee->context, rq);
	if (ee->simulated)
		return NULL;
1375

1376 1377 1378 1379 1380 1381 1382 1383 1384
	/*
	 * We need to copy these to an anonymous buffer
	 * as the simplest method to avoid being overwritten
	 * by userspace.
	 */
	vma = capture_vma(vma, rq->batch, "batch", gfp);
	vma = capture_user(vma, rq, gfp);
	vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
	vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1385

1386 1387 1388
	ee->rq_head = rq->head;
	ee->rq_post = rq->postfix;
	ee->rq_tail = rq->tail;
1389

1390 1391
	return vma;
}
1392

1393 1394 1395 1396 1397 1398
void
intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
			      struct intel_engine_capture_vma *capture,
			      struct i915_vma_compress *compress)
{
	const struct intel_engine_cs *engine = ee->engine;
1399

1400 1401 1402
	while (capture) {
		struct intel_engine_capture_vma *this = capture;
		struct i915_vma *vma = this->vma;
1403

1404 1405 1406 1407
		add_vma(ee,
			i915_vma_coredump_create(engine->gt,
						 vma, this->name,
						 compress));
1408

1409
		i915_active_release(&vma->active);
1410

1411 1412 1413
		capture = this->next;
		kfree(this);
	}
1414

1415 1416 1417 1418 1419
	add_vma(ee,
		i915_vma_coredump_create(engine->gt,
					 engine->status_page.vma,
					 "HW Status",
					 compress));
1420

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	add_vma(ee,
		i915_vma_coredump_create(engine->gt,
					 engine->wa_ctx.vma,
					 "WA context",
					 compress));
}

static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs *engine,
	       struct i915_vma_compress *compress)
{
1432
	struct intel_engine_capture_vma *capture = NULL;
1433 1434 1435
	struct intel_engine_coredump *ee;
	struct i915_request *rq;
	unsigned long flags;
1436

1437 1438 1439
	ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
	if (!ee)
		return NULL;
1440

1441 1442
	spin_lock_irqsave(&engine->active.lock, flags);
	rq = intel_engine_find_active_request(engine);
1443 1444 1445 1446 1447
	if (rq)
		capture = intel_engine_coredump_add_request(ee, rq,
							    ATOMIC_MAYFAIL);
	spin_unlock_irqrestore(&engine->active.lock, flags);
	if (!capture) {
1448 1449 1450
		kfree(ee);
		return NULL;
	}
1451

1452
	intel_engine_coredump_add_vma(ee, capture, compress);
1453

1454
	return ee;
1455 1456
}

1457
static void
1458 1459
gt_record_engines(struct intel_gt_coredump *gt,
		  struct i915_vma_compress *compress)
1460
{
1461 1462
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
1463

1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
	for_each_engine(engine, gt->_gt, id) {
		struct intel_engine_coredump *ee;

		/* Refill our page pool before entering atomic section */
		pool_refill(&compress->pool, ALLOW_FAIL);

		ee = capture_engine(engine, compress);
		if (!ee)
			continue;

		gt->simulated |= ee->simulated;
		if (ee->simulated) {
			kfree(ee);
			continue;
		}

		ee->next = gt->engine;
		gt->engine = ee;
	}
}

static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
	     struct i915_vma_compress *compress)
{
	const struct intel_uc *uc = &gt->_gt->uc;
	struct intel_uc_coredump *error_uc;

	error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
	if (!error_uc)
		return NULL;
1495

1496 1497
	memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
	memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1498 1499 1500 1501 1502

	/* Non-default firmware paths will be specified by the modparam.
	 * As modparams are generally accesible from the userspace make
	 * explicit copies of the firmware paths.
	 */
1503 1504
	error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
	error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1505 1506 1507 1508 1509 1510 1511 1512
	error_uc->guc_log =
		i915_vma_coredump_create(gt->_gt,
					 uc->guc.log.vma, "GuC log buffer",
					 compress);

	return error_uc;
}

1513
/* Capture all registers which don't fit into another category. */
1514
static void gt_record_regs(struct intel_gt_coredump *gt)
1515
{
1516 1517
	struct intel_uncore *uncore = gt->_gt->uncore;
	struct drm_i915_private *i915 = uncore->i915;
1518
	int i;
1519

1520 1521
	/*
	 * General organization
1522 1523 1524 1525 1526 1527
	 * 1. Registers specific to a single generation
	 * 2. Registers which belong to multiple generations
	 * 3. Feature specific registers.
	 * 4. Everything else
	 * Please try to follow the order.
	 */
1528

1529
	/* 1: Registers specific to a single generation */
1530
	if (IS_VALLEYVIEW(i915)) {
1531 1532 1533
		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
		gt->ier = intel_uncore_read(uncore, VLV_IER);
		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1534
	}
1535

1536
	if (IS_GEN(i915, 7))
1537
		gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1538

1539
	if (INTEL_GEN(i915) >= 12) {
1540 1541 1542 1543
		gt->fault_data0 = intel_uncore_read(uncore,
						    GEN12_FAULT_TLB_DATA0);
		gt->fault_data1 = intel_uncore_read(uncore,
						    GEN12_FAULT_TLB_DATA1);
1544
	} else if (INTEL_GEN(i915) >= 8) {
1545 1546 1547 1548
		gt->fault_data0 = intel_uncore_read(uncore,
						    GEN8_FAULT_TLB_DATA0);
		gt->fault_data1 = intel_uncore_read(uncore,
						    GEN8_FAULT_TLB_DATA1);
1549 1550
	}

1551
	if (IS_GEN(i915, 6)) {
1552 1553 1554
		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
		gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
		gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1555
	}
1556

1557
	/* 2: Registers which belong to multiple generations */
1558
	if (INTEL_GEN(i915) >= 7)
1559
		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1560

1561
	if (INTEL_GEN(i915) >= 6) {
1562
		gt->derrmr = intel_uncore_read(uncore, DERRMR);
1563
		if (INTEL_GEN(i915) < 12) {
1564 1565
			gt->error = intel_uncore_read(uncore, ERROR_GEN6);
			gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1566
		}
1567 1568
	}

1569
	/* 3: Feature specific registers */
1570
	if (IS_GEN_RANGE(i915, 6, 7)) {
1571 1572
		gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
		gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1573 1574
	}

1575
	if (IS_GEN_RANGE(i915, 8, 11))
1576
		gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1577

1578
	if (IS_GEN(i915, 12))
1579
		gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1580

1581 1582
	if (INTEL_GEN(i915) >= 12) {
		for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
1583
			gt->sfc_done[i] =
1584 1585
				intel_uncore_read(uncore, GEN12_SFC_DONE(i));
		}
M
Mika Kuoppala 已提交
1586

1587
		gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1588 1589
	}

1590
	/* 4: Everything else */
1591
	if (INTEL_GEN(i915) >= 11) {
1592 1593
		gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
		gt->gtier[0] =
1594 1595
			intel_uncore_read(uncore,
					  GEN11_RENDER_COPY_INTR_ENABLE);
1596
		gt->gtier[1] =
1597
			intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1598
		gt->gtier[2] =
1599
			intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1600
		gt->gtier[3] =
1601 1602
			intel_uncore_read(uncore,
					  GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1603
		gt->gtier[4] =
1604 1605
			intel_uncore_read(uncore,
					  GEN11_CRYPTO_RSVD_INTR_ENABLE);
1606
		gt->gtier[5] =
1607 1608
			intel_uncore_read(uncore,
					  GEN11_GUNIT_CSME_INTR_ENABLE);
1609
		gt->ngtier = 6;
1610
	} else if (INTEL_GEN(i915) >= 8) {
1611
		gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1612
		for (i = 0; i < 4; i++)
1613 1614 1615
			gt->gtier[i] =
				intel_uncore_read(uncore, GEN8_GT_IER(i));
		gt->ngtier = 4;
1616
	} else if (HAS_PCH_SPLIT(i915)) {
1617 1618 1619
		gt->ier = intel_uncore_read(uncore, DEIER);
		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
		gt->ngtier = 1;
1620
	} else if (IS_GEN(i915, 2)) {
1621
		gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1622
	} else if (!IS_VALLEYVIEW(i915)) {
1623
		gt->ier = intel_uncore_read(uncore, GEN2_IER);
1624
	}
1625 1626 1627 1628
	gt->eir = intel_uncore_read(uncore, EIR);
	gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}

1629 1630 1631 1632 1633
static void gt_record_info(struct intel_gt_coredump *gt)
{
	memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
}

1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
/*
 * Generate a semi-unique error code. The code is not meant to have meaning, The
 * code's only purpose is to try to prevent false duplicated bug reports by
 * grossly estimating a GPU error state.
 *
 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
 * the hang if we could strip the GTT offset information from it.
 *
 * It's only a small step better than a random number in its current form.
 */
static u32 generate_ecode(const struct intel_engine_coredump *ee)
{
	/*
	 * IPEHR would be an ideal way to detect errors, as it's the gross
	 * measure of "the command that hung." However, has some very common
	 * synchronization commands which almost always appear in the case
	 * strictly a client bug. Use instdone to differentiate those some.
	 */
	return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1653 1654
}

1655
static const char *error_msg(struct i915_gpu_coredump *error)
1656
{
1657 1658 1659
	struct intel_engine_coredump *first = NULL;
	struct intel_gt_coredump *gt;
	intel_engine_mask_t engines;
1660
	int len;
1661

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
	engines = 0;
	for (gt = error->gt; gt; gt = gt->next) {
		struct intel_engine_coredump *cs;

		if (gt->engine && !first)
			first = gt->engine;

		for (cs = gt->engine; cs; cs = cs->next)
			engines |= cs->engine->mask;
	}

1673
	len = scnprintf(error->error_msg, sizeof(error->error_msg),
1674
			"GPU HANG: ecode %d:%x:%08x",
1675
			INTEL_GEN(error->i915), engines,
1676
			generate_ecode(first));
1677
	if (first && first->context.pid) {
1678
		/* Just show the first executing process, more is confusing */
1679 1680 1681
		len += scnprintf(error->error_msg + len,
				 sizeof(error->error_msg) - len,
				 ", in %s [%d]",
1682
				 first->context.comm, first->context.pid);
1683
	}
1684

1685
	return error->error_msg;
1686 1687
}

1688
static void capture_gen(struct i915_gpu_coredump *error)
1689
{
1690 1691 1692 1693
	struct drm_i915_private *i915 = error->i915;

	error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
	error->suspended = i915->runtime_pm.suspended;
1694

1695 1696 1697 1698
	error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
	error->iommu = intel_iommu_gfx_mapped;
#endif
1699 1700
	error->reset_count = i915_reset_count(&i915->gpu_error);
	error->suspend_count = i915->suspend_count;
1701

1702
	i915_params_copy(&error->params, &i915->params);
1703
	memcpy(&error->device_info,
1704
	       INTEL_INFO(i915),
1705
	       sizeof(error->device_info));
1706 1707 1708
	memcpy(&error->runtime_info,
	       RUNTIME_INFO(i915),
	       sizeof(error->runtime_info));
1709
	error->driver_caps = i915->caps;
1710 1711
}

1712 1713
struct i915_gpu_coredump *
i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1714
{
1715 1716
	struct i915_gpu_coredump *error;

1717
	if (!i915->params.error_capture)
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
		return NULL;

	error = kzalloc(sizeof(*error), gfp);
	if (!error)
		return NULL;

	kref_init(&error->ref);
	error->i915 = i915;

	error->time = ktime_get_real();
	error->boottime = ktime_get_boottime();
	error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
	error->capture = jiffies;

	capture_gen(error);

	return error;
1735 1736
}

1737 1738 1739 1740
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))

struct intel_gt_coredump *
intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1741
{
1742
	struct intel_gt_coredump *gc;
1743

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
	gc = kzalloc(sizeof(*gc), gfp);
	if (!gc)
		return NULL;

	gc->_gt = gt;
	gc->awake = intel_gt_pm_is_awake(gt);

	gt_record_regs(gc);
	gt_record_fences(gc);

	return gc;
}
1756

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
struct i915_vma_compress *
i915_vma_capture_prepare(struct intel_gt_coredump *gt)
{
	struct i915_vma_compress *compress;

	compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
	if (!compress)
		return NULL;

	if (!compress_init(compress)) {
		kfree(compress);
		return NULL;
1769
	}
1770 1771

	return compress;
1772 1773
}

1774 1775 1776 1777 1778
void i915_vma_capture_finish(struct intel_gt_coredump *gt,
			     struct i915_vma_compress *compress)
{
	if (!compress)
		return;
1779

1780 1781 1782 1783 1784
	compress_fini(compress);
	kfree(compress);
}

struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
1785
{
1786
	struct i915_gpu_coredump *error;
1787

1788 1789 1790 1791 1792
	/* Check if GPU capture has been disabled */
	error = READ_ONCE(i915->gpu_error.first_error);
	if (IS_ERR(error))
		return error;

1793 1794
	error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
	if (!error)
1795
		return ERR_PTR(-ENOMEM);
1796

1797 1798 1799
	error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL);
	if (error->gt) {
		struct i915_vma_compress *compress;
1800

1801 1802 1803 1804 1805 1806
		compress = i915_vma_capture_prepare(error->gt);
		if (!compress) {
			kfree(error->gt);
			kfree(error);
			return ERR_PTR(-ENOMEM);
		}
1807

1808
		gt_record_info(error->gt);
1809 1810 1811 1812
		gt_record_engines(error->gt, compress);

		if (INTEL_INFO(i915)->has_gt_uc)
			error->gt->uc = gt_record_uc(error->gt, compress);
1813

1814 1815 1816 1817
		i915_vma_capture_finish(error->gt, compress);

		error->simulated |= error->gt->simulated;
	}
1818 1819 1820 1821

	error->overlay = intel_overlay_capture_error_state(i915);
	error->display = intel_display_capture_error_state(i915);

1822 1823 1824
	return error;
}

1825
void i915_error_state_store(struct i915_gpu_coredump *error)
1826
{
1827
	struct drm_i915_private *i915;
1828
	static bool warned;
1829

1830
	if (IS_ERR_OR_NULL(error))
1831 1832
		return;

1833
	i915 = error->i915;
1834
	drm_info(&i915->drm, "%s\n", error_msg(error));
1835

1836 1837
	if (error->simulated ||
	    cmpxchg(&i915->gpu_error.first_error, NULL, error))
1838 1839
		return;

1840
	i915_gpu_coredump_get(error);
1841

1842
	if (!xchg(&warned, true) &&
1843
	    ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1844
		pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1845 1846
		pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
		pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1847 1848 1849 1850
		pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
		pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
		pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
			i915->drm.primary->index);
1851
	}
1852 1853
}

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
/**
 * i915_capture_error_state - capture an error record for later analysis
 * @i915: i915 device
 *
 * Should be called when an error is detected (either a hang or an error
 * interrupt) to capture error state from the time of the error.  Fills
 * out a structure which becomes available in debugfs for user level tools
 * to pick up.
 */
void i915_capture_error_state(struct drm_i915_private *i915)
{
	struct i915_gpu_coredump *error;

	error = i915_gpu_coredump(i915);
	if (IS_ERR(error)) {
		cmpxchg(&i915->gpu_error.first_error, NULL, error);
		return;
	}

	i915_error_state_store(error);
	i915_gpu_coredump_put(error);
}

struct i915_gpu_coredump *
1878
i915_first_error_state(struct drm_i915_private *i915)
1879
{
1880
	struct i915_gpu_coredump *error;
1881

1882 1883
	spin_lock_irq(&i915->gpu_error.lock);
	error = i915->gpu_error.first_error;
1884
	if (!IS_ERR_OR_NULL(error))
1885
		i915_gpu_coredump_get(error);
1886
	spin_unlock_irq(&i915->gpu_error.lock);
1887

1888
	return error;
1889 1890
}

1891
void i915_reset_error_state(struct drm_i915_private *i915)
1892
{
1893
	struct i915_gpu_coredump *error;
1894

1895 1896
	spin_lock_irq(&i915->gpu_error.lock);
	error = i915->gpu_error.first_error;
1897 1898
	if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
		i915->gpu_error.first_error = NULL;
1899
	spin_unlock_irq(&i915->gpu_error.lock);
1900

1901
	if (!IS_ERR_OR_NULL(error))
1902
		i915_gpu_coredump_put(error);
1903 1904 1905 1906 1907 1908 1909 1910
}

void i915_disable_error_state(struct drm_i915_private *i915, int err)
{
	spin_lock_irq(&i915->gpu_error.lock);
	if (!i915->gpu_error.first_error)
		i915->gpu_error.first_error = ERR_PTR(err);
	spin_unlock_irq(&i915->gpu_error.lock);
1911
}