i915_gpu_error.c 35.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Copyright (c) 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *    Mika Kuoppala <mika.kuoppala@intel.com>
 *
 */

#include <generated/utsrelease.h>
#include "i915_drv.h"

static const char *yesno(int v)
{
	return v ? "yes" : "no";
}

static const char *ring_str(int ring)
{
	switch (ring) {
	case RCS: return "render";
	case VCS: return "bsd";
	case BCS: return "blt";
	case VECS: return "vebox";
45
	case VCS2: return "bsd2";
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	default: return "";
	}
}

static const char *pin_flag(int pinned)
{
	if (pinned > 0)
		return " P";
	else if (pinned < 0)
		return " p";
	else
		return "";
}

static const char *tiling_flag(int tiling)
{
	switch (tiling) {
	default:
	case I915_TILING_NONE: return "";
	case I915_TILING_X: return " X";
	case I915_TILING_Y: return " Y";
	}
}

static const char *dirty_flag(int dirty)
{
	return dirty ? " dirty" : "";
}

static const char *purgeable_flag(int purgeable)
{
	return purgeable ? " purgeable" : "";
}

static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
{

	if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
		e->err = -ENOSPC;
		return false;
	}

	if (e->bytes == e->size - 1 || e->err)
		return false;

	return true;
}

static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
			      unsigned len)
{
	if (e->pos + len <= e->start) {
		e->pos += len;
		return false;
	}

	/* First vsnprintf needs to fit in its entirety for memmove */
	if (len >= e->size) {
		e->err = -EIO;
		return false;
	}

	return true;
}

static void __i915_error_advance(struct drm_i915_error_state_buf *e,
				 unsigned len)
{
	/* If this is first printf in this window, adjust it so that
	 * start position matches start of the buffer
	 */

	if (e->pos < e->start) {
		const size_t off = e->start - e->pos;

		/* Should not happen but be paranoid */
		if (off > len || e->bytes) {
			e->err = -EIO;
			return;
		}

		memmove(e->buf, e->buf + off, len - off);
		e->bytes = len - off;
		e->pos = e->start;
		return;
	}

	e->bytes += len;
	e->pos += len;
}

static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
			       const char *f, va_list args)
{
	unsigned len;

	if (!__i915_error_ok(e))
		return;

	/* Seek the first printf which is hits start position */
	if (e->pos < e->start) {
147 148 149
		va_list tmp;

		va_copy(tmp, args);
150 151 152 153
		len = vsnprintf(NULL, 0, f, tmp);
		va_end(tmp);

		if (!__i915_error_seek(e, len))
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
			return;
	}

	len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
	if (len >= e->size - e->bytes)
		len = e->size - e->bytes - 1;

	__i915_error_advance(e, len);
}

static void i915_error_puts(struct drm_i915_error_state_buf *e,
			    const char *str)
{
	unsigned len;

	if (!__i915_error_ok(e))
		return;

	len = strlen(str);

	/* Seek the first printf which is hits start position */
	if (e->pos < e->start) {
		if (!__i915_error_seek(e, len))
			return;
	}

	if (len >= e->size - e->bytes)
		len = e->size - e->bytes - 1;
	memcpy(e->buf + e->bytes, str, len);

	__i915_error_advance(e, len);
}

#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)

static void print_error_buffers(struct drm_i915_error_state_buf *m,
				const char *name,
				struct drm_i915_error_buffer *err,
				int count)
{
	err_printf(m, "%s [%d]:\n", name, count);

	while (count--) {
		err_printf(m, "  %08x %8u %02x %02x %x %x",
			   err->gtt_offset,
			   err->size,
			   err->read_domains,
			   err->write_domain,
			   err->rseqno, err->wseqno);
		err_puts(m, pin_flag(err->pinned));
		err_puts(m, tiling_flag(err->tiling));
		err_puts(m, dirty_flag(err->dirty));
		err_puts(m, purgeable_flag(err->purgeable));
208
		err_puts(m, err->userptr ? " userptr" : "");
209 210 211 212 213 214 215 216 217 218 219 220 221 222
		err_puts(m, err->ring != -1 ? " " : "");
		err_puts(m, ring_str(err->ring));
		err_puts(m, i915_cache_level_str(err->cache_level));

		if (err->name)
			err_printf(m, " (name: %d)", err->name);
		if (err->fence_reg != I915_FENCE_REG_NONE)
			err_printf(m, " (fence: %d)", err->fence_reg);

		err_puts(m, "\n");
		err++;
	}
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
{
	switch (a) {
	case HANGCHECK_IDLE:
		return "idle";
	case HANGCHECK_WAIT:
		return "wait";
	case HANGCHECK_ACTIVE:
		return "active";
	case HANGCHECK_KICK:
		return "kick";
	case HANGCHECK_HUNG:
		return "hung";
	}

	return "unknown";
}

241 242
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
				  struct drm_device *dev,
243
				  struct drm_i915_error_ring *ring)
244
{
245
	if (!ring->valid)
246 247
		return;

248 249 250 251
	err_printf(m, "  HEAD: 0x%08x\n", ring->head);
	err_printf(m, "  TAIL: 0x%08x\n", ring->tail);
	err_printf(m, "  CTL: 0x%08x\n", ring->ctl);
	err_printf(m, "  HWS: 0x%08x\n", ring->hws);
252
	err_printf(m, "  ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
253 254 255
	err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
	err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
	err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
256
	if (INTEL_INFO(dev)->gen >= 4) {
257
		err_printf(m, "  BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
258 259
		err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
		err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
260
	}
261
	err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
262 263
	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
		   lower_32_bits(ring->faddr));
264
	if (INTEL_INFO(dev)->gen >= 6) {
265 266
		err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
		err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
267
		err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
268 269
			   ring->semaphore_mboxes[0],
			   ring->semaphore_seqno[0]);
270
		err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
271 272
			   ring->semaphore_mboxes[1],
			   ring->semaphore_seqno[1]);
273 274
		if (HAS_VEBOX(dev)) {
			err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
275 276
				   ring->semaphore_mboxes[2],
				   ring->semaphore_seqno[2]);
277
		}
278
	}
279 280 281 282 283 284 285 286 287 288 289 290 291
	if (USES_PPGTT(dev)) {
		err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);

		if (INTEL_INFO(dev)->gen >= 8) {
			int i;
			for (i = 0; i < 4; i++)
				err_printf(m, "  PDP%d: 0x%016llx\n",
					   i, ring->vm_info.pdp[i]);
		} else {
			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
				   ring->vm_info.pp_dir_base);
		}
	}
292 293 294 295
	err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
	err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
	err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
	err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
296
	err_printf(m, "  hangcheck: %s [%d]\n",
297 298
		   hangcheck_action_to_str(ring->hangcheck_action),
		   ring->hangcheck_score);
299 300 301 302 303 304 305 306 307 308 309
}

void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
{
	va_list args;

	va_start(args, f);
	i915_error_vprintf(e, f, args);
	va_end(args);
}

310 311 312 313 314 315 316 317 318 319 320 321 322 323
static void print_error_obj(struct drm_i915_error_state_buf *m,
			    struct drm_i915_error_object *obj)
{
	int page, offset, elt;

	for (page = offset = 0; page < obj->page_count; page++) {
		for (elt = 0; elt < PAGE_SIZE/4; elt++) {
			err_printf(m, "%08x :  %08x\n", offset,
				   obj->pages[page][elt]);
			offset += 4;
		}
	}
}

324 325 326 327
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
			    const struct i915_error_state_file_priv *error_priv)
{
	struct drm_device *dev = error_priv->dev;
328
	struct drm_i915_private *dev_priv = dev->dev_private;
329
	struct drm_i915_error_state *error = error_priv->error;
330
	struct drm_i915_error_object *obj;
331 332
	int i, j, offset, elt;
	int max_hangcheck_score;
333 334 335 336 337 338

	if (!error) {
		err_printf(m, "no error state collected\n");
		goto out;
	}

339
	err_printf(m, "%s\n", error->error_msg);
340 341 342
	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
		   error->time.tv_usec);
	err_printf(m, "Kernel: " UTS_RELEASE "\n");
343 344 345 346 347 348 349 350 351 352 353 354 355 356
	max_hangcheck_score = 0;
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
		if (error->ring[i].hangcheck_score > max_hangcheck_score)
			max_hangcheck_score = error->ring[i].hangcheck_score;
	}
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
		if (error->ring[i].hangcheck_score == max_hangcheck_score &&
		    error->ring[i].pid != -1) {
			err_printf(m, "Active process (on ring %s): %s [%d]\n",
				   ring_str(i),
				   error->ring[i].comm,
				   error->ring[i].pid);
		}
	}
357
	err_printf(m, "Reset count: %u\n", error->reset_count);
358
	err_printf(m, "Suspend count: %u\n", error->suspend_count);
359
	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
360 361 362 363 364 365
	err_printf(m, "EIR: 0x%08x\n", error->eir);
	err_printf(m, "IER: 0x%08x\n", error->ier);
	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
	err_printf(m, "CCID: 0x%08x\n", error->ccid);
366
	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382

	for (i = 0; i < dev_priv->num_fence_regs; i++)
		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);

	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
		err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
			   error->extra_instdone[i]);

	if (INTEL_INFO(dev)->gen >= 6) {
		err_printf(m, "ERROR: 0x%08x\n", error->error);
		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
	}

	if (INTEL_INFO(dev)->gen == 7)
		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);

383 384 385 386
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
		err_printf(m, "%s command stream:\n", ring_str(i));
		i915_ring_error_state(m, dev, &error->ring[i]);
	}
387 388 389

	if (error->active_bo)
		print_error_buffers(m, "Active",
390 391
				    error->active_bo[0],
				    error->active_bo_count[0]);
392 393 394

	if (error->pinned_bo)
		print_error_buffers(m, "Pinned",
395 396
				    error->pinned_bo[0],
				    error->pinned_bo_count[0]);
397 398

	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
399 400 401 402 403 404 405 406
		obj = error->ring[i].batchbuffer;
		if (obj) {
			err_puts(m, dev_priv->ring[i].name);
			if (error->ring[i].pid != -1)
				err_printf(m, " (submitted by %s [%d])",
					   error->ring[i].comm,
					   error->ring[i].pid);
			err_printf(m, " --- gtt_offset = 0x%08x\n",
407
				   obj->gtt_offset);
408 409 410 411 412 413 414 415
			print_error_obj(m, obj);
		}

		obj = error->ring[i].wa_batchbuffer;
		if (obj) {
			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
				   dev_priv->ring[i].name, obj->gtt_offset);
			print_error_obj(m, obj);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		}

		if (error->ring[i].num_requests) {
			err_printf(m, "%s --- %d requests\n",
				   dev_priv->ring[i].name,
				   error->ring[i].num_requests);
			for (j = 0; j < error->ring[i].num_requests; j++) {
				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
					   error->ring[i].requests[j].seqno,
					   error->ring[i].requests[j].jiffies,
					   error->ring[i].requests[j].tail);
			}
		}

		if ((obj = error->ring[i].ringbuffer)) {
			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
				   dev_priv->ring[i].name,
				   obj->gtt_offset);
434
			print_error_obj(m, obj);
435 436
		}

437
		if ((obj = error->ring[i].hws_page)) {
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
			err_printf(m, "%s --- HW Status = 0x%08x\n",
				   dev_priv->ring[i].name,
				   obj->gtt_offset);
			offset = 0;
			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
					   offset,
					   obj->pages[0][elt],
					   obj->pages[0][elt+1],
					   obj->pages[0][elt+2],
					   obj->pages[0][elt+3]);
					offset += 16;
			}
		}

453
		if ((obj = error->ring[i].ctx)) {
454 455 456
			err_printf(m, "%s --- HW Context = 0x%08x\n",
				   dev_priv->ring[i].name,
				   obj->gtt_offset);
457
			print_error_obj(m, obj);
458 459 460
		}
	}

461 462 463 464 465 466 467 468 469 470 471 472
	if ((obj = error->semaphore_obj)) {
		err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
		for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
			err_printf(m, "[%04x] %08x %08x %08x %08x\n",
				   elt * 4,
				   obj->pages[0][elt],
				   obj->pages[0][elt+1],
				   obj->pages[0][elt+2],
				   obj->pages[0][elt+3]);
		}
	}

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	if (error->overlay)
		intel_overlay_print_error_state(m, error->overlay);

	if (error->display)
		intel_display_print_error_state(m, dev, error->display);

out:
	if (m->bytes == 0 && m->err)
		return m->err;

	return 0;
}

int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
			      size_t count, loff_t pos)
{
	memset(ebuf, 0, sizeof(*ebuf));

	/* We need to have enough room to store any i915_error_state printf
	 * so that we can move it to start position.
	 */
	ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
	ebuf->buf = kmalloc(ebuf->size,
				GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);

	if (ebuf->buf == NULL) {
		ebuf->size = PAGE_SIZE;
		ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
	}

	if (ebuf->buf == NULL) {
		ebuf->size = 128;
		ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
	}

	if (ebuf->buf == NULL)
		return -ENOMEM;

	ebuf->start = pos;

	return 0;
}

static void i915_error_object_free(struct drm_i915_error_object *obj)
{
	int page;

	if (obj == NULL)
		return;

	for (page = 0; page < obj->page_count; page++)
		kfree(obj->pages[page]);

	kfree(obj);
}

static void i915_error_state_free(struct kref *error_ref)
{
	struct drm_i915_error_state *error = container_of(error_ref,
							  typeof(*error), ref);
	int i;

	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
		i915_error_object_free(error->ring[i].batchbuffer);
		i915_error_object_free(error->ring[i].ringbuffer);
538
		i915_error_object_free(error->ring[i].hws_page);
539 540 541 542
		i915_error_object_free(error->ring[i].ctx);
		kfree(error->ring[i].requests);
	}

543
	i915_error_object_free(error->semaphore_obj);
544 545 546 547 548 549 550 551 552
	kfree(error->active_bo);
	kfree(error->overlay);
	kfree(error->display);
	kfree(error);
}

static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
			       struct drm_i915_gem_object *src,
553
			       struct i915_address_space *vm,
554 555 556 557 558 559 560 561 562 563 564 565 566
			       const int num_pages)
{
	struct drm_i915_error_object *dst;
	int i;
	u32 reloc_offset;

	if (src == NULL || src->pages == NULL)
		return NULL;

	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
	if (dst == NULL)
		return NULL;

567
	reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
568 569 570 571 572 573 574 575 576
	for (i = 0; i < num_pages; i++) {
		unsigned long flags;
		void *d;

		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
		if (d == NULL)
			goto unwind;

		local_irq_save(flags);
577 578
		if (src->cache_level == I915_CACHE_NONE &&
		    reloc_offset < dev_priv->gtt.mappable_end &&
579 580
		    src->has_global_gtt_mapping &&
		    i915_is_ggtt(vm)) {
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
			void __iomem *s;

			/* Simply ignore tiling or any overlapping fence.
			 * It's part of the error state, and this hopefully
			 * captures what the GPU read.
			 */

			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
						     reloc_offset);
			memcpy_fromio(d, s, PAGE_SIZE);
			io_mapping_unmap_atomic(s);
		} else if (src->stolen) {
			unsigned long offset;

			offset = dev_priv->mm.stolen_base;
			offset += src->stolen->start;
			offset += i << PAGE_SHIFT;

			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
		} else {
			struct page *page;
			void *s;

			page = i915_gem_object_get_page(src, i);

			drm_clflush_pages(&page, 1);

			s = kmap_atomic(page);
			memcpy(d, s, PAGE_SIZE);
			kunmap_atomic(s);

			drm_clflush_pages(&page, 1);
		}
		local_irq_restore(flags);

		dst->pages[i] = d;

		reloc_offset += PAGE_SIZE;
	}
	dst->page_count = num_pages;

	return dst;

unwind:
	while (i--)
		kfree(dst->pages[i]);
	kfree(dst);
	return NULL;
}
630 631 632 633 634 635
#define i915_error_object_create(dev_priv, src, vm) \
	i915_error_object_create_sized((dev_priv), (src), (vm), \
				       (src)->base.size>>PAGE_SHIFT)

#define i915_error_ggtt_object_create(dev_priv, src) \
	i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
636 637 638 639 640 641 642 643 644 645 646 647 648 649
				       (src)->base.size>>PAGE_SHIFT)

static void capture_bo(struct drm_i915_error_buffer *err,
		       struct drm_i915_gem_object *obj)
{
	err->size = obj->base.size;
	err->name = obj->base.name;
	err->rseqno = obj->last_read_seqno;
	err->wseqno = obj->last_write_seqno;
	err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
	err->read_domains = obj->base.read_domains;
	err->write_domain = obj->base.write_domain;
	err->fence_reg = obj->fence_reg;
	err->pinned = 0;
B
Ben Widawsky 已提交
650
	if (i915_gem_obj_is_pinned(obj))
651 652 653 654 655 656
		err->pinned = 1;
	if (obj->user_pin_count > 0)
		err->pinned = -1;
	err->tiling = obj->tiling_mode;
	err->dirty = obj->dirty;
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
657
	err->userptr = obj->userptr.mm != NULL;
658 659 660 661 662 663 664
	err->ring = obj->ring ? obj->ring->id : -1;
	err->cache_level = obj->cache_level;
}

static u32 capture_active_bo(struct drm_i915_error_buffer *err,
			     int count, struct list_head *head)
{
B
Ben Widawsky 已提交
665
	struct i915_vma *vma;
666 667
	int i = 0;

B
Ben Widawsky 已提交
668 669
	list_for_each_entry(vma, head, mm_list) {
		capture_bo(err++, vma->obj);
670 671 672 673 674 675 676 677 678 679 680 681 682 683
		if (++i == count)
			break;
	}

	return i;
}

static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
			     int count, struct list_head *head)
{
	struct drm_i915_gem_object *obj;
	int i = 0;

	list_for_each_entry(obj, head, global_list) {
B
Ben Widawsky 已提交
684
		if (!i915_gem_obj_is_pinned(obj))
685 686 687 688 689 690 691 692 693 694
			continue;

		capture_bo(err++, obj);
		if (++i == count)
			break;
	}

	return i;
}

695 696 697 698 699 700 701 702 703 704
/* Generate a semi-unique error code. The code is not meant to have meaning, The
 * code's only purpose is to try to prevent false duplicated bug reports by
 * grossly estimating a GPU error state.
 *
 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
 * the hang if we could strip the GTT offset information from it.
 *
 * It's only a small step better than a random number in its current form.
 */
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
705 706
					 struct drm_i915_error_state *error,
					 int *ring_id)
707 708 709 710 711 712 713 714 715
{
	uint32_t error_code = 0;
	int i;

	/* IPEHR would be an ideal way to detect errors, as it's the gross
	 * measure of "the command that hung." However, has some very common
	 * synchronization commands which almost always appear in the case
	 * strictly a client bug. Use instdone to differentiate those some.
	 */
716 717 718 719 720
	for (i = 0; i < I915_NUM_RINGS; i++) {
		if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
			if (ring_id)
				*ring_id = i;

721
			return error->ring[i].ipehr ^ error->ring[i].instdone;
722 723
		}
	}
724 725 726 727

	return error_code;
}

728 729 730 731 732 733 734 735
static void i915_gem_record_fences(struct drm_device *dev,
				   struct drm_i915_error_state *error)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	/* Fences */
	switch (INTEL_INFO(dev)->gen) {
736
	case 8:
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
	case 7:
	case 6:
		for (i = 0; i < dev_priv->num_fence_regs; i++)
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
		break;
	case 5:
	case 4:
		for (i = 0; i < 16; i++)
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
		break;
	case 3:
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
			for (i = 0; i < 8; i++)
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
	case 2:
		for (i = 0; i < 8; i++)
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
		break;

	default:
		BUG();
	}
}

761

762 763 764 765 766
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
					struct drm_i915_error_state *error,
					struct intel_engine_cs *ring,
					struct drm_i915_error_ring *ering)
{
767
	struct intel_engine_cs *to;
768 769 770 771 772 773 774 775 776 777 778
	int i;

	if (!i915_semaphore_is_enabled(dev_priv->dev))
		return;

	if (!error->semaphore_obj)
		error->semaphore_obj =
			i915_error_object_create(dev_priv,
						 dev_priv->semaphore_obj,
						 &dev_priv->gtt.base);

779 780 781 782
	for_each_ring(to, dev_priv, i) {
		int idx;
		u16 signal_offset;
		u32 *tmp;
783

784 785 786 787 788 789 790 791 792
		if (ring == to)
			continue;

		signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
		tmp = error->semaphore_obj->pages[0];
		idx = intel_ring_sync_index(ring, to);

		ering->semaphore_mboxes[idx] = tmp[signal_offset];
		ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
793 794 795
	}
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
					struct intel_engine_cs *ring,
					struct drm_i915_error_ring *ering)
{
	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
	ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
	ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];

	if (HAS_VEBOX(dev_priv->dev)) {
		ering->semaphore_mboxes[2] =
			I915_READ(RING_SYNC_2(ring->mmio_base));
		ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
	}
}

812
static void i915_record_ring_state(struct drm_device *dev,
813
				   struct drm_i915_error_state *error,
814
				   struct intel_engine_cs *ring,
815
				   struct drm_i915_error_ring *ering)
816 817 818 819
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (INTEL_INFO(dev)->gen >= 6) {
820 821
		ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
		ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
822 823 824 825
		if (INTEL_INFO(dev)->gen >= 8)
			gen8_record_semaphore_state(dev_priv, error, ring, ering);
		else
			gen6_record_semaphore_state(dev_priv, ring, ering);
826 827
	}

828
	if (INTEL_INFO(dev)->gen >= 4) {
829 830 831 832 833 834
		ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
		ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
		ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
		ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
		ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
		ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
835 836
		if (INTEL_INFO(dev)->gen >= 8) {
			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
837
			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
838
		}
839
		ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
840
	} else {
841 842 843 844
		ering->faddr = I915_READ(DMA_FADD_I8XX);
		ering->ipeir = I915_READ(IPEIR);
		ering->ipehr = I915_READ(IPEHR);
		ering->instdone = I915_READ(INSTDONE);
845 846
	}

847 848 849 850 851 852 853
	ering->waiting = waitqueue_active(&ring->irq_queue);
	ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
	ering->seqno = ring->get_seqno(ring, false);
	ering->acthd = intel_ring_get_active_head(ring);
	ering->head = I915_READ_HEAD(ring);
	ering->tail = I915_READ_TAIL(ring);
	ering->ctl = I915_READ_CTL(ring);
854

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
	if (I915_NEED_GFX_HWS(dev)) {
		int mmio;

		if (IS_GEN7(dev)) {
			switch (ring->id) {
			default:
			case RCS:
				mmio = RENDER_HWS_PGA_GEN7;
				break;
			case BCS:
				mmio = BLT_HWS_PGA_GEN7;
				break;
			case VCS:
				mmio = BSD_HWS_PGA_GEN7;
				break;
			case VECS:
				mmio = VEBOX_HWS_PGA_GEN7;
				break;
			}
		} else if (IS_GEN6(ring->dev)) {
			mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
		} else {
			/* XXX: gen8 returns to sanity */
			mmio = RING_HWS_PGA(ring->mmio_base);
		}

881
		ering->hws = I915_READ(mmio);
882 883
	}

884 885
	ering->cpu_ring_head = ring->buffer->head;
	ering->cpu_ring_tail = ring->buffer->tail;
886

887 888
	ering->hangcheck_score = ring->hangcheck.score;
	ering->hangcheck_action = ring->hangcheck.action;
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905

	if (USES_PPGTT(dev)) {
		int i;

		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));

		switch (INTEL_INFO(dev)->gen) {
		case 8:
			for (i = 0; i < 4; i++) {
				ering->vm_info.pdp[i] =
					I915_READ(GEN8_RING_PDP_UDW(ring, i));
				ering->vm_info.pdp[i] <<= 32;
				ering->vm_info.pdp[i] |=
					I915_READ(GEN8_RING_PDP_LDW(ring, i));
			}
			break;
		case 7:
906 907
			ering->vm_info.pp_dir_base =
				I915_READ(RING_PP_DIR_BASE(ring));
908 909
			break;
		case 6:
910 911
			ering->vm_info.pp_dir_base =
				I915_READ(RING_PP_DIR_BASE_READ(ring));
912 913 914
			break;
		}
	}
915 916 917
}


918
static void i915_gem_record_active_context(struct intel_engine_cs *ring,
919 920 921 922 923 924 925 926 927 928 929
					   struct drm_i915_error_state *error,
					   struct drm_i915_error_ring *ering)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct drm_i915_gem_object *obj;

	/* Currently render ring is the only HW context user */
	if (ring->id != RCS || !error->ccid)
		return;

	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
930 931 932
		if (!i915_gem_obj_ggtt_bound(obj))
			continue;

933
		if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
934
			ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
935 936 937 938 939 940 941 942 943 944 945 946
			break;
		}
	}
}

static void i915_gem_record_rings(struct drm_device *dev,
				  struct drm_i915_error_state *error)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_request *request;
	int i, count;

947
	for (i = 0; i < I915_NUM_RINGS; i++) {
948
		struct intel_engine_cs *ring = &dev_priv->ring[i];
949

950 951
		error->ring[i].pid = -1;

952 953 954 955 956
		if (ring->dev == NULL)
			continue;

		error->ring[i].valid = true;

957
		i915_record_ring_state(dev, error, ring, &error->ring[i]);
958

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
		request = i915_gem_find_active_request(ring);
		if (request) {
			/* We need to copy these to an anonymous buffer
			 * as the simplest method to avoid being overwritten
			 * by userspace.
			 */
			error->ring[i].batchbuffer =
				i915_error_object_create(dev_priv,
							 request->batch_obj,
							 request->ctx ?
							 request->ctx->vm :
							 &dev_priv->gtt.base);

			if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
			    ring->scratch.obj)
				error->ring[i].wa_batchbuffer =
					i915_error_ggtt_object_create(dev_priv,
							     ring->scratch.obj);

			if (request->file_priv) {
				struct task_struct *task;

				rcu_read_lock();
				task = pid_task(request->file_priv->file->pid,
						PIDTYPE_PID);
				if (task) {
					strcpy(error->ring[i].comm, task->comm);
					error->ring[i].pid = task->pid;
				}
				rcu_read_unlock();
			}
		}
991 992

		error->ring[i].ringbuffer =
993
			i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
994

995
		if (ring->status_page.obj)
996
			error->ring[i].hws_page =
997
				i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
998 999 1000 1001 1002 1003 1004 1005 1006

		i915_gem_record_active_context(ring, error, &error->ring[i]);

		count = 0;
		list_for_each_entry(request, &ring->request_list, list)
			count++;

		error->ring[i].num_requests = count;
		error->ring[i].requests =
D
Daniel Vetter 已提交
1007
			kcalloc(count, sizeof(*error->ring[i].requests),
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
				GFP_ATOMIC);
		if (error->ring[i].requests == NULL) {
			error->ring[i].num_requests = 0;
			continue;
		}

		count = 0;
		list_for_each_entry(request, &ring->request_list, list) {
			struct drm_i915_error_request *erq;

			erq = &error->ring[i].requests[count++];
			erq->seqno = request->seqno;
			erq->jiffies = request->emitted_jiffies;
			erq->tail = request->tail;
		}
	}
}

1026 1027 1028 1029 1030 1031 1032
/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
 * VM.
 */
static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
				struct drm_i915_error_state *error,
				struct i915_address_space *vm,
				const int ndx)
1033
{
1034
	struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
1035
	struct drm_i915_gem_object *obj;
1036
	struct i915_vma *vma;
1037 1038 1039
	int i;

	i = 0;
B
Ben Widawsky 已提交
1040
	list_for_each_entry(vma, &vm->active_list, mm_list)
1041
		i++;
1042
	error->active_bo_count[ndx] = i;
1043
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
B
Ben Widawsky 已提交
1044
		if (i915_gem_obj_is_pinned(obj))
1045
			i++;
1046
	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
1047 1048

	if (i) {
D
Daniel Vetter 已提交
1049
		active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
1050 1051
		if (active_bo)
			pinned_bo = active_bo + error->active_bo_count[ndx];
1052 1053
	}

1054 1055 1056 1057
	if (active_bo)
		error->active_bo_count[ndx] =
			capture_active_bo(active_bo,
					  error->active_bo_count[ndx],
1058
					  &vm->active_list);
1059

1060 1061 1062 1063
	if (pinned_bo)
		error->pinned_bo_count[ndx] =
			capture_pinned_bo(pinned_bo,
					  error->pinned_bo_count[ndx],
1064
					  &dev_priv->mm.bound_list);
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	error->active_bo[ndx] = active_bo;
	error->pinned_bo[ndx] = pinned_bo;
}

static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
				     struct drm_i915_error_state *error)
{
	struct i915_address_space *vm;
	int cnt = 0, i = 0;

	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
		cnt++;

	error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
	error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
	error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
					 GFP_ATOMIC);
	error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
					 GFP_ATOMIC);

	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
		i915_gem_capture_vm(dev_priv, error, vm, i++);
1087 1088
}

1089 1090 1091
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
				   struct drm_i915_error_state *error)
1092
{
1093
	struct drm_device *dev = dev_priv->dev;
1094

1095 1096 1097 1098 1099 1100 1101
	/* General organization
	 * 1. Registers specific to a single generation
	 * 2. Registers which belong to multiple generations
	 * 3. Feature specific registers.
	 * 4. Everything else
	 * Please try to follow the order.
	 */
1102

1103 1104
	/* 1: Registers specific to a single generation */
	if (IS_VALLEYVIEW(dev)) {
1105
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1106 1107
		error->forcewake = I915_READ(FORCEWAKE_VLV);
	}
1108

1109 1110
	if (IS_GEN7(dev))
		error->err_int = I915_READ(GEN7_ERR_INT);
1111

1112
	if (IS_GEN6(dev)) {
1113
		error->forcewake = I915_READ(FORCEWAKE);
1114 1115 1116
		error->gab_ctl = I915_READ(GAB_CTL);
		error->gfx_mode = I915_READ(GFX_MODE);
	}
1117

1118 1119 1120
	/* 2: Registers which belong to multiple generations */
	if (INTEL_INFO(dev)->gen >= 7)
		error->forcewake = I915_READ(FORCEWAKE_MT);
1121 1122

	if (INTEL_INFO(dev)->gen >= 6) {
1123
		error->derrmr = I915_READ(DERRMR);
1124 1125 1126 1127
		error->error = I915_READ(ERROR_GEN6);
		error->done_reg = I915_READ(DONE_REG);
	}

1128
	/* 3: Feature specific registers */
1129 1130 1131 1132 1133 1134
	if (IS_GEN6(dev) || IS_GEN7(dev)) {
		error->gam_ecochk = I915_READ(GAM_ECOCHK);
		error->gac_eco = I915_READ(GAC_ECO_BITS);
	}

	/* 4: Everything else */
1135 1136 1137 1138 1139 1140
	if (HAS_HW_CONTEXTS(dev))
		error->ccid = I915_READ(CCID);

	if (HAS_PCH_SPLIT(dev))
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
	else {
1141 1142 1143 1144
		if (IS_GEN2(dev))
			error->ier = I915_READ16(IER);
		else
			error->ier = I915_READ(IER);
1145 1146 1147 1148 1149
	}

	/* 4: Everything else */
	error->eir = I915_READ(EIR);
	error->pgtbl_er = I915_READ(PGTBL_ER);
1150 1151

	i915_get_extra_instdone(dev, error->extra_instdone);
1152 1153
}

1154
static void i915_error_capture_msg(struct drm_device *dev,
1155 1156 1157
				   struct drm_i915_error_state *error,
				   bool wedged,
				   const char *error_msg)
1158 1159 1160
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 ecode;
1161
	int ring_id = -1, len;
1162 1163 1164

	ecode = i915_error_generate_code(dev_priv, error, &ring_id);

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	len = scnprintf(error->error_msg, sizeof(error->error_msg),
			"GPU HANG: ecode %d:0x%08x", ring_id, ecode);

	if (ring_id != -1 && error->ring[ring_id].pid != -1)
		len += scnprintf(error->error_msg + len,
				 sizeof(error->error_msg) - len,
				 ", in %s [%d]",
				 error->ring[ring_id].comm,
				 error->ring[ring_id].pid);

	scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
		  ", reason: %s, action: %s",
		  error_msg,
		  wedged ? "reset" : "continue");
1179 1180
}

1181 1182 1183 1184
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
				   struct drm_i915_error_state *error)
{
	error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1185
	error->suspend_count = dev_priv->suspend_count;
1186 1187
}

1188 1189 1190 1191 1192 1193 1194 1195 1196
/**
 * i915_capture_error_state - capture an error record for later analysis
 * @dev: drm device
 *
 * Should be called when an error is detected (either a hang or an error
 * interrupt) to capture error state from the time of the error.  Fills
 * out a structure which becomes available in debugfs for user level tools
 * to pick up.
 */
1197 1198
void i915_capture_error_state(struct drm_device *dev, bool wedged,
			      const char *error_msg)
1199
{
1200
	static bool warned;
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_error_state *error;
	unsigned long flags;

	/* Account for pipe specific data like PIPE*STAT */
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
	if (!error) {
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
		return;
	}

1212 1213
	kref_init(&error->ref);

1214
	i915_capture_gen_state(dev_priv, error);
1215 1216 1217 1218
	i915_capture_reg_state(dev_priv, error);
	i915_gem_capture_buffers(dev_priv, error);
	i915_gem_record_fences(dev, error);
	i915_gem_record_rings(dev, error);
1219

1220 1221 1222 1223 1224
	do_gettimeofday(&error->time);

	error->overlay = intel_overlay_capture_error_state(dev);
	error->display = intel_display_capture_error_state(dev);

1225
	i915_error_capture_msg(dev, error, wedged, error_msg);
1226 1227
	DRM_INFO("%s\n", error->error_msg);

1228 1229 1230 1231 1232 1233 1234
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
	if (dev_priv->gpu_error.first_error == NULL) {
		dev_priv->gpu_error.first_error = error;
		error = NULL;
	}
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);

1235
	if (error) {
1236
		i915_error_state_free(&error->ref);
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		return;
	}

	if (!warned) {
		DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
		DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
		DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
		DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
		DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
		warned = true;
	}
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
}

void i915_error_state_get(struct drm_device *dev,
			  struct i915_error_state_file_priv *error_priv)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long flags;

	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
	error_priv->error = dev_priv->gpu_error.first_error;
	if (error_priv->error)
		kref_get(&error_priv->error->ref);
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);

}

void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
{
	if (error_priv->error)
		kref_put(&error_priv->error->ref, i915_error_state_free);
}

void i915_destroy_error_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_error_state *error;
	unsigned long flags;

	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
	error = dev_priv->gpu_error.first_error;
	dev_priv->gpu_error.first_error = NULL;
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);

	if (error)
		kref_put(&error->ref, i915_error_state_free);
}

const char *i915_cache_level_str(int type)
{
	switch (type) {
	case I915_CACHE_NONE: return " uncached";
1289 1290
	case I915_CACHE_LLC: return " snooped or LLC";
	case I915_CACHE_L3_LLC: return " L3+LLC";
1291
	case I915_CACHE_WT: return " WT";
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	default: return "";
	}
}

/* NB: please notice the memset */
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);

	switch (INTEL_INFO(dev)->gen) {
	case 2:
	case 3:
		instdone[0] = I915_READ(INSTDONE);
		break;
	case 4:
	case 5:
	case 6:
		instdone[0] = I915_READ(INSTDONE_I965);
		instdone[1] = I915_READ(INSTDONE1);
		break;
	default:
		WARN_ONCE(1, "Unsupported platform\n");
	case 7:
1316
	case 8:
1317 1318 1319 1320 1321 1322 1323
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
		break;
	}
}