i915_debugfs.c 53.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *
 */

29
#include <linux/sched/mm.h>
30 31
#include <linux/sort.h>

32
#include <drm/drm_debugfs.h>
33

34
#include "gem/i915_gem_context.h"
35
#include "gt/intel_gt_pm.h"
36
#include "gt/intel_gt_requests.h"
37
#include "gt/intel_reset.h"
38
#include "gt/intel_rc6.h"
39
#include "gt/intel_rps.h"
40

41
#include "i915_debugfs.h"
42
#include "i915_debugfs_params.h"
43
#include "i915_irq.h"
44
#include "i915_trace.h"
45
#include "intel_pm.h"
46
#include "intel_sideband.h"
47

48 49 50 51 52
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
	return to_i915(node->minor->dev);
}

53 54
static int i915_capabilities(struct seq_file *m, void *data)
{
55
	struct drm_i915_private *i915 = node_to_i915(m->private);
56
	struct drm_printer p = drm_seq_file_printer(m);
57

58
	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
59

60 61 62
	intel_device_info_print_static(INTEL_INFO(i915), &p);
	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
	intel_driver_caps_print(&i915->caps, &p);
63

64
	kernel_param_lock(THIS_MODULE);
65
	i915_params_dump(&i915_modparams, &p);
66 67
	kernel_param_unlock(THIS_MODULE);

68 69
	return 0;
}
70

71
static char get_tiling_flag(struct drm_i915_gem_object *obj)
72
{
73
	switch (i915_gem_object_get_tiling(obj)) {
74
	default:
75 76 77
	case I915_TILING_NONE: return ' ';
	case I915_TILING_X: return 'X';
	case I915_TILING_Y: return 'Y';
78
	}
79 80
}

81
static char get_global_flag(struct drm_i915_gem_object *obj)
82
{
83
	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
84 85
}

86
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
87
{
C
Chris Wilson 已提交
88
	return obj->mm.mapping ? 'M' : ' ';
B
Ben Widawsky 已提交
89 90
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
static const char *
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
{
	size_t x = 0;

	switch (page_sizes) {
	case 0:
		return "";
	case I915_GTT_PAGE_SIZE_4K:
		return "4K";
	case I915_GTT_PAGE_SIZE_64K:
		return "64K";
	case I915_GTT_PAGE_SIZE_2M:
		return "2M";
	default:
		if (!buf)
			return "M";

		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
			x += snprintf(buf + x, len - x, "2M, ");
		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
			x += snprintf(buf + x, len - x, "64K, ");
		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
			x += snprintf(buf + x, len - x, "4K, ");
		buf[x-2] = '\0';

		return buf;
	}
}

121 122
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
123
{
124
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
125
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
126
	struct i915_vma *vma;
B
Ben Widawsky 已提交
127 128
	int pin_count = 0;

129
	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
130 131
		   &obj->base,
		   get_tiling_flag(obj),
B
Ben Widawsky 已提交
132
		   get_global_flag(obj),
133
		   get_pin_mapped_flag(obj),
134
		   obj->base.size / 1024,
135 136
		   obj->read_domains,
		   obj->write_domain,
137
		   i915_cache_level_str(dev_priv, obj->cache_level),
C
Chris Wilson 已提交
138 139
		   obj->mm.dirty ? " dirty" : "",
		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 141
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
142 143

	spin_lock(&obj->vma.lock);
144
	list_for_each_entry(vma, &obj->vma.list, obj_link) {
145 146 147
		if (!drm_mm_node_allocated(&vma->node))
			continue;

148 149 150 151 152
		spin_unlock(&obj->vma.lock);

		if (i915_vma_is_pinned(vma))
			pin_count++;

153
		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
154
			   i915_vma_is_ggtt(vma) ? "g" : "pp",
155 156
			   vma->node.start, vma->node.size,
			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
157 158 159 160 161 162 163 164
		if (i915_vma_is_ggtt(vma)) {
			switch (vma->ggtt_view.type) {
			case I915_GGTT_VIEW_NORMAL:
				seq_puts(m, ", normal");
				break;

			case I915_GGTT_VIEW_PARTIAL:
				seq_printf(m, ", partial [%08llx+%x]",
165 166
					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
					   vma->ggtt_view.partial.size << PAGE_SHIFT);
167 168 169 170
				break;

			case I915_GGTT_VIEW_ROTATED:
				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
171 172 173 174 175 176 177 178
					   vma->ggtt_view.rotated.plane[0].width,
					   vma->ggtt_view.rotated.plane[0].height,
					   vma->ggtt_view.rotated.plane[0].stride,
					   vma->ggtt_view.rotated.plane[0].offset,
					   vma->ggtt_view.rotated.plane[1].width,
					   vma->ggtt_view.rotated.plane[1].height,
					   vma->ggtt_view.rotated.plane[1].stride,
					   vma->ggtt_view.rotated.plane[1].offset);
179 180
				break;

181 182 183 184 185 186 187 188 189 190 191 192
			case I915_GGTT_VIEW_REMAPPED:
				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
					   vma->ggtt_view.remapped.plane[0].width,
					   vma->ggtt_view.remapped.plane[0].height,
					   vma->ggtt_view.remapped.plane[0].stride,
					   vma->ggtt_view.remapped.plane[0].offset,
					   vma->ggtt_view.remapped.plane[1].width,
					   vma->ggtt_view.remapped.plane[1].height,
					   vma->ggtt_view.remapped.plane[1].stride,
					   vma->ggtt_view.remapped.plane[1].offset);
				break;

193 194 195 196 197
			default:
				MISSING_CASE(vma->ggtt_view.type);
				break;
			}
		}
198
		if (vma->fence)
199
			seq_printf(m, " , fence: %d", vma->fence->id);
200
		seq_puts(m, ")");
201 202

		spin_lock(&obj->vma.lock);
B
Ben Widawsky 已提交
203
	}
204 205 206
	spin_unlock(&obj->vma.lock);

	seq_printf(m, " (pinned x %d)", pin_count);
207
	if (obj->stolen)
208
		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
209 210
	if (i915_gem_object_is_framebuffer(obj))
		seq_printf(m, " (fb)");
211

212
	engine = i915_gem_object_last_write_engine(obj);
213 214
	if (engine)
		seq_printf(m, " (%s)", engine->name);
215 216
}

217
struct file_stats {
218
	struct i915_address_space *vm;
219
	unsigned long count;
220
	u64 total;
221
	u64 active, inactive;
222
	u64 closed;
223 224 225 226 227 228
};

static int per_file_stats(int id, void *ptr, void *data)
{
	struct drm_i915_gem_object *obj = ptr;
	struct file_stats *stats = data;
229
	struct i915_vma *vma;
230

231 232 233
	if (!kref_get_unless_zero(&obj->base.refcount))
		return 0;

234 235
	stats->count++;
	stats->total += obj->base.size;
236

237 238 239 240
	spin_lock(&obj->vma.lock);
	if (!stats->vm) {
		for_each_ggtt_vma(vma, obj) {
			if (!drm_mm_node_allocated(&vma->node))
241
				continue;
242

243 244 245 246
			if (i915_vma_is_active(vma))
				stats->active += vma->node.size;
			else
				stats->inactive += vma->node.size;
247

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
			if (i915_vma_is_closed(vma))
				stats->closed += vma->node.size;
		}
	} else {
		struct rb_node *p = obj->vma.tree.rb_node;

		while (p) {
			long cmp;

			vma = rb_entry(p, typeof(*vma), obj_node);
			cmp = i915_vma_compare(vma, stats->vm, NULL);
			if (cmp == 0) {
				if (drm_mm_node_allocated(&vma->node)) {
					if (i915_vma_is_active(vma))
						stats->active += vma->node.size;
					else
						stats->inactive += vma->node.size;

					if (i915_vma_is_closed(vma))
						stats->closed += vma->node.size;
				}
				break;
			}
			if (cmp < 0)
				p = p->rb_right;
			else
				p = p->rb_left;
		}
276
	}
277
	spin_unlock(&obj->vma.lock);
278

279
	i915_gem_object_put(obj);
280 281 282
	return 0;
}

283 284
#define print_file_stats(m, name, stats) do { \
	if (stats.count) \
285
		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
286 287 288 289 290
			   name, \
			   stats.count, \
			   stats.total, \
			   stats.active, \
			   stats.inactive, \
291
			   stats.closed); \
292
} while (0)
293

294 295
static void print_context_stats(struct seq_file *m,
				struct drm_i915_private *i915)
296
{
297
	struct file_stats kstats = {};
298
	struct i915_gem_context *ctx, *cn;
299

300 301
	spin_lock(&i915->gem.contexts.lock);
	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
302
		struct i915_gem_engines_iter it;
303
		struct intel_context *ce;
304

305 306 307 308 309
		if (!kref_get_unless_zero(&ctx->ref))
			continue;

		spin_unlock(&i915->gem.contexts.lock);

310 311
		for_each_gem_engine(ce,
				    i915_gem_context_lock_engines(ctx), it) {
312
			if (intel_context_pin_if_active(ce)) {
313
				rcu_read_lock();
314 315 316
				if (ce->state)
					per_file_stats(0,
						       ce->state->obj, &kstats);
317
				per_file_stats(0, ce->ring->vma->obj, &kstats);
318
				rcu_read_unlock();
319
				intel_context_unpin(ce);
320
			}
321
		}
322
		i915_gem_context_unlock_engines(ctx);
323

324
		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
325 326 327
			struct file_stats stats = {
				.vm = rcu_access_pointer(ctx->vm),
			};
328 329 330
			struct drm_file *file = ctx->file_priv->file;
			struct task_struct *task;
			char name[80];
331

332
			rcu_read_lock();
333
			idr_for_each(&file->object_idr, per_file_stats, &stats);
334
			rcu_read_unlock();
335

336 337
			rcu_read_lock();
			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
338 339
			snprintf(name, sizeof(name), "%s",
				 task ? task->comm : "<unknown>");
340
			rcu_read_unlock();
341

342 343
			print_file_stats(m, name, stats);
		}
344 345 346 347

		spin_lock(&i915->gem.contexts.lock);
		list_safe_reset_next(ctx, cn, link);
		i915_gem_context_put(ctx);
348
	}
349
	spin_unlock(&i915->gem.contexts.lock);
350

351
	print_file_stats(m, "[k]contexts", kstats);
352 353
}

354
static int i915_gem_object_info(struct seq_file *m, void *data)
355
{
356
	struct drm_i915_private *i915 = node_to_i915(m->private);
357 358
	struct intel_memory_region *mr;
	enum intel_region_id id;
359

360
	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
361
		   i915->mm.shrink_count,
362
		   atomic_read(&i915->mm.free_count),
363
		   i915->mm.shrink_memory);
364 365 366
	for_each_memory_region(mr, i915, id)
		seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
			   mr->name, &mr->total, &mr->avail);
367
	seq_putc(m, '\n');
368

369
	print_context_stats(m, i915);
370 371 372 373

	return 0;
}

374 375 376
static void gen8_display_interrupt_info(struct seq_file *m)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
377
	enum pipe pipe;
378 379 380

	for_each_pipe(dev_priv, pipe) {
		enum intel_display_power_domain power_domain;
381
		intel_wakeref_t wakeref;
382 383

		power_domain = POWER_DOMAIN_PIPE(pipe);
384 385 386
		wakeref = intel_display_power_get_if_enabled(dev_priv,
							     power_domain);
		if (!wakeref) {
387 388 389 390 391 392 393 394 395 396 397 398 399 400
			seq_printf(m, "Pipe %c power disabled\n",
				   pipe_name(pipe));
			continue;
		}
		seq_printf(m, "Pipe %c IMR:\t%08x\n",
			   pipe_name(pipe),
			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
		seq_printf(m, "Pipe %c IIR:\t%08x\n",
			   pipe_name(pipe),
			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
		seq_printf(m, "Pipe %c IER:\t%08x\n",
			   pipe_name(pipe),
			   I915_READ(GEN8_DE_PIPE_IER(pipe)));

401
		intel_display_power_put(dev_priv, power_domain, wakeref);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	}

	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
		   I915_READ(GEN8_DE_PORT_IMR));
	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
		   I915_READ(GEN8_DE_PORT_IIR));
	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
		   I915_READ(GEN8_DE_PORT_IER));

	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
		   I915_READ(GEN8_DE_MISC_IMR));
	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
		   I915_READ(GEN8_DE_MISC_IIR));
	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
		   I915_READ(GEN8_DE_MISC_IER));

	seq_printf(m, "PCU interrupt mask:\t%08x\n",
		   I915_READ(GEN8_PCU_IMR));
	seq_printf(m, "PCU interrupt identity:\t%08x\n",
		   I915_READ(GEN8_PCU_IIR));
	seq_printf(m, "PCU interrupt enable:\t%08x\n",
		   I915_READ(GEN8_PCU_IER));
}

426 427
static int i915_interrupt_info(struct seq_file *m, void *data)
{
428
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
429
	struct intel_engine_cs *engine;
430
	intel_wakeref_t wakeref;
431
	int i, pipe;
432

433
	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
434

435
	if (IS_CHERRYVIEW(dev_priv)) {
436 437
		intel_wakeref_t pref;

438 439 440 441 442 443 444 445 446 447 448
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
449 450 451 452
		for_each_pipe(dev_priv, pipe) {
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
453 454 455
			pref = intel_display_power_get_if_enabled(dev_priv,
								  power_domain);
			if (!pref) {
456 457 458 459 460
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}

461 462 463 464
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

465
			intel_display_power_put(dev_priv, power_domain, pref);
466 467
		}

468
		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
469 470 471 472 473 474
		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));
475
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
	} else if (INTEL_GEN(dev_priv) >= 11) {
		seq_printf(m, "Master Interrupt Control:  %08x\n",
			   I915_READ(GEN11_GFX_MSTR_IRQ));

		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));

		seq_printf(m, "Display Interrupt Control:\t%08x\n",
			   I915_READ(GEN11_DISPLAY_INT_CTL));

		gen8_display_interrupt_info(m);
513
	} else if (INTEL_GEN(dev_priv) >= 8) {
514 515 516 517 518 519 520 521 522 523 524 525
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

526
		gen8_display_interrupt_info(m);
527
	} else if (IS_VALLEYVIEW(dev_priv)) {
528 529
		intel_wakeref_t pref;

J
Jesse Barnes 已提交
530 531 532 533 534 535 536 537
		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
538 539 540 541
		for_each_pipe(dev_priv, pipe) {
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
542 543 544
			pref = intel_display_power_get_if_enabled(dev_priv,
								  power_domain);
			if (!pref) {
545 546 547 548 549
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}

J
Jesse Barnes 已提交
550 551 552
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
553
			intel_display_power_put(dev_priv, power_domain, pref);
554
		}
J
Jesse Barnes 已提交
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

		seq_printf(m, "Master IER:\t%08x\n",
			   I915_READ(VLV_MASTER_IER));

		seq_printf(m, "Render IER:\t%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Render IIR:\t%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Render IMR:\t%08x\n",
			   I915_READ(GTIMR));

		seq_printf(m, "PM IER:\t\t%08x\n",
			   I915_READ(GEN6_PMIER));
		seq_printf(m, "PM IIR:\t\t%08x\n",
			   I915_READ(GEN6_PMIIR));
		seq_printf(m, "PM IMR:\t\t%08x\n",
			   I915_READ(GEN6_PMIMR));

573
		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
J
Jesse Barnes 已提交
574 575 576 577 578 579
		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));
580
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
J
Jesse Barnes 已提交
581

582
	} else if (!HAS_PCH_SPLIT(dev_priv)) {
583
		seq_printf(m, "Interrupt enable:    %08x\n",
584
			   I915_READ(GEN2_IER));
585
		seq_printf(m, "Interrupt identity:  %08x\n",
586
			   I915_READ(GEN2_IIR));
587
		seq_printf(m, "Interrupt mask:      %08x\n",
588
			   I915_READ(GEN2_IMR));
589
		for_each_pipe(dev_priv, pipe)
590 591 592
			seq_printf(m, "Pipe %c stat:         %08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
	} else {
		seq_printf(m, "North Display Interrupt enable:		%08x\n",
			   I915_READ(DEIER));
		seq_printf(m, "North Display Interrupt identity:	%08x\n",
			   I915_READ(DEIIR));
		seq_printf(m, "North Display Interrupt mask:		%08x\n",
			   I915_READ(DEIMR));
		seq_printf(m, "South Display Interrupt enable:		%08x\n",
			   I915_READ(SDEIER));
		seq_printf(m, "South Display Interrupt identity:	%08x\n",
			   I915_READ(SDEIIR));
		seq_printf(m, "South Display Interrupt mask:		%08x\n",
			   I915_READ(SDEIMR));
		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
			   I915_READ(GTIMR));
	}
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634

	if (INTEL_GEN(dev_priv) >= 11) {
		seq_printf(m, "RCS Intr Mask:\t %08x\n",
			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
		seq_printf(m, "BCS Intr Mask:\t %08x\n",
			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
			   I915_READ(GEN11_GUC_SG_INTR_MASK));
		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));

	} else if (INTEL_GEN(dev_priv) >= 6) {
635
		for_each_uabi_engine(engine, dev_priv) {
636 637
			seq_printf(m,
				   "Graphics Interrupt mask (%s):	%08x\n",
638
				   engine->name, ENGINE_READ(engine, RING_IMR));
639 640
		}
	}
641

642
	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
643

644 645 646
	return 0;
}

647 648
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
649 650
	struct drm_i915_private *i915 = node_to_i915(m->private);
	unsigned int i;
651

652
	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
653

654 655
	rcu_read_lock();
	for (i = 0; i < i915->ggtt.num_fences; i++) {
656 657
		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
		struct i915_vma *vma = reg->vma;
658

C
Chris Wilson 已提交
659
		seq_printf(m, "Fence %d, pin count = %d, object = ",
660
			   i, atomic_read(&reg->pin_count));
661
		if (!vma)
662
			seq_puts(m, "unused");
663
		else
664
			i915_debugfs_describe_obj(m, vma->obj);
665
		seq_putc(m, '\n');
666
	}
667
	rcu_read_unlock();
668 669 670 671

	return 0;
}

672
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
673 674
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
			      size_t count, loff_t *pos)
675
{
676
	struct i915_gpu_coredump *error;
677
	ssize_t ret;
C
Chris Wilson 已提交
678
	void *buf;
679

C
Chris Wilson 已提交
680
	error = file->private_data;
681 682
	if (!error)
		return 0;
683

C
Chris Wilson 已提交
684 685 686 687
	/* Bounce buffer required because of kernfs __user API convenience. */
	buf = kmalloc(count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
688

689
	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
C
Chris Wilson 已提交
690
	if (ret <= 0)
691
		goto out;
692

C
Chris Wilson 已提交
693 694 695 696
	if (!copy_to_user(ubuf, buf, ret))
		*pos += ret;
	else
		ret = -EFAULT;
697

698
out:
C
Chris Wilson 已提交
699
	kfree(buf);
700 701
	return ret;
}
702

703 704
static int gpu_state_release(struct inode *inode, struct file *file)
{
705
	i915_gpu_coredump_put(file->private_data);
706
	return 0;
707 708
}

709
static int i915_gpu_info_open(struct inode *inode, struct file *file)
710
{
711
	struct drm_i915_private *i915 = inode->i_private;
712
	struct i915_gpu_coredump *gpu;
713
	intel_wakeref_t wakeref;
714

715
	gpu = NULL;
716
	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
717
		gpu = i915_gpu_coredump(i915);
718 719
	if (IS_ERR(gpu))
		return PTR_ERR(gpu);
720

721
	file->private_data = gpu;
722 723 724
	return 0;
}

725 726 727 728 729 730 731 732 733 734 735 736 737
static const struct file_operations i915_gpu_info_fops = {
	.owner = THIS_MODULE,
	.open = i915_gpu_info_open,
	.read = gpu_state_read,
	.llseek = default_llseek,
	.release = gpu_state_release,
};

static ssize_t
i915_error_state_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
738
{
739
	struct i915_gpu_coredump *error = filp->private_data;
740

741 742
	if (!error)
		return 0;
743

744 745
	DRM_DEBUG_DRIVER("Resetting error state\n");
	i915_reset_error_state(error->i915);
746

747 748
	return cnt;
}
749

750 751
static int i915_error_state_open(struct inode *inode, struct file *file)
{
752
	struct i915_gpu_coredump *error;
753 754 755 756 757 758

	error = i915_first_error_state(inode->i_private);
	if (IS_ERR(error))
		return PTR_ERR(error);

	file->private_data  = error;
759
	return 0;
760 761 762 763 764
}

static const struct file_operations i915_error_state_fops = {
	.owner = THIS_MODULE,
	.open = i915_error_state_open,
765
	.read = gpu_state_read,
766 767
	.write = i915_error_state_write,
	.llseek = default_llseek,
768
	.release = gpu_state_release,
769
};
770 771
#endif

772
static int i915_frequency_info(struct seq_file *m, void *unused)
773
{
774
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
775
	struct intel_uncore *uncore = &dev_priv->uncore;
776
	struct intel_rps *rps = &dev_priv->gt.rps;
777
	intel_wakeref_t wakeref;
778 779
	int ret = 0;

780
	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
781

782
	if (IS_GEN(dev_priv, 5)) {
783 784
		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
785 786 787 788 789 790 791

		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
			   MEMSTAT_VID_SHIFT);
		seq_printf(m, "Current P-state: %d\n",
			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
792
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
793
		u32 rpmodectl, freq_sts;
794

795 796 797 798 799 800 801 802 803
		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		seq_printf(m, "Video Turbo Mode: %s\n",
			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
		seq_printf(m, "HW control enabled: %s\n",
			   yesno(rpmodectl & GEN6_RP_ENABLE));
		seq_printf(m, "SW control enabled: %s\n",
			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
				  GEN6_RP_MEDIA_SW_MODE));

804
		vlv_punit_get(dev_priv);
805
		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
806 807
		vlv_punit_put(dev_priv);

808 809 810 811
		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);

		seq_printf(m, "actual GPU freq: %d MHz\n",
812
			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
813 814

		seq_printf(m, "current GPU freq: %d MHz\n",
815
			   intel_gpu_freq(rps, rps->cur_freq));
816 817

		seq_printf(m, "max GPU freq: %d MHz\n",
818
			   intel_gpu_freq(rps, rps->max_freq));
819 820

		seq_printf(m, "min GPU freq: %d MHz\n",
821
			   intel_gpu_freq(rps, rps->min_freq));
822 823

		seq_printf(m, "idle GPU freq: %d MHz\n",
824
			   intel_gpu_freq(rps, rps->idle_freq));
825 826 827

		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
828
			   intel_gpu_freq(rps, rps->efficient_freq));
829
	} else if (INTEL_GEN(dev_priv) >= 6) {
830 831 832
		u32 rp_state_limits;
		u32 gt_perf_status;
		u32 rp_state_cap;
833
		u32 rpmodectl, rpinclimit, rpdeclimit;
834
		u32 rpstat, cagf, reqf;
835 836
		u32 rpupei, rpcurup, rpprevup;
		u32 rpdownei, rpcurdown, rpprevdown;
837
		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
838 839
		int max_freq;

840
		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
841
		if (IS_GEN9_LP(dev_priv)) {
842 843 844 845 846 847 848
			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
		} else {
			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
		}

849
		/* RPSTAT1 is in the GT power well */
850
		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
851

852
		reqf = I915_READ(GEN6_RPNSWREQ);
853
		if (INTEL_GEN(dev_priv) >= 9)
854 855 856
			reqf >>= 23;
		else {
			reqf &= ~GEN6_TURBO_DISABLE;
857
			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
858 859 860 861
				reqf >>= 24;
			else
				reqf >>= 25;
		}
862
		reqf = intel_gpu_freq(rps, reqf);
863

864 865 866 867
		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);

868
		rpstat = I915_READ(GEN6_RPSTAT1);
869 870 871 872 873 874
		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
875
		cagf = intel_rps_read_actual_frequency(rps);
876

877
		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
878

879 880 881 882 883 884 885 886 887 888
		if (INTEL_GEN(dev_priv) >= 11) {
			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
			/*
			 * The equivalent to the PM ISR & IIR cannot be read
			 * without affecting the current state of the system
			 */
			pm_isr = 0;
			pm_iir = 0;
		} else if (INTEL_GEN(dev_priv) >= 8) {
889 890 891 892
			pm_ier = I915_READ(GEN8_GT_IER(2));
			pm_imr = I915_READ(GEN8_GT_IMR(2));
			pm_isr = I915_READ(GEN8_GT_ISR(2));
			pm_iir = I915_READ(GEN8_GT_IIR(2));
893 894 895 896 897
		} else {
			pm_ier = I915_READ(GEN6_PMIER);
			pm_imr = I915_READ(GEN6_PMIMR);
			pm_isr = I915_READ(GEN6_PMISR);
			pm_iir = I915_READ(GEN6_PMIIR);
898
		}
899 900
		pm_mask = I915_READ(GEN6_PMINTRMSK);

901 902 903 904 905 906 907
		seq_printf(m, "Video Turbo Mode: %s\n",
			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
		seq_printf(m, "HW control enabled: %s\n",
			   yesno(rpmodectl & GEN6_RP_ENABLE));
		seq_printf(m, "SW control enabled: %s\n",
			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
				  GEN6_RP_MEDIA_SW_MODE));
908 909 910 911 912 913

		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
			   pm_ier, pm_imr, pm_mask);
		if (INTEL_GEN(dev_priv) <= 10)
			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
				   pm_isr, pm_iir);
914
		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
915
			   rps->pm_intrmsk_mbz);
916 917
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "Render p-state ratio: %d\n",
918
			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
919 920 921 922
		seq_printf(m, "Render p-state VID: %d\n",
			   gt_perf_status & 0xff);
		seq_printf(m, "Render p-state limit: %d\n",
			   rp_state_limits & 0xff);
923 924 925 926
		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
927
		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
B
Ben Widawsky 已提交
928
		seq_printf(m, "CAGF: %dMHz\n", cagf);
929 930 931 932 933 934
		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
		seq_printf(m, "RP CUR UP: %d (%dus)\n",
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
C
Chris Wilson 已提交
935 936
		seq_printf(m, "Up threshold: %d%%\n",
			   rps->power.up_threshold);
937

938 939 940 941 942 943
		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
C
Chris Wilson 已提交
944 945
		seq_printf(m, "Down threshold: %d%%\n",
			   rps->power.down_threshold);
946

947
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
948
			    rp_state_cap >> 16) & 0xff;
949
		max_freq *= (IS_GEN9_BC(dev_priv) ||
950
			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
951
		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
952
			   intel_gpu_freq(rps, max_freq));
953 954

		max_freq = (rp_state_cap & 0xff00) >> 8;
955
		max_freq *= (IS_GEN9_BC(dev_priv) ||
956
			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
957
		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
958
			   intel_gpu_freq(rps, max_freq));
959

960
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
961
			    rp_state_cap >> 0) & 0xff;
962
		max_freq *= (IS_GEN9_BC(dev_priv) ||
963
			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
964
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
965
			   intel_gpu_freq(rps, max_freq));
966
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
967
			   intel_gpu_freq(rps, rps->max_freq));
968

969
		seq_printf(m, "Current freq: %d MHz\n",
970
			   intel_gpu_freq(rps, rps->cur_freq));
971
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
972
		seq_printf(m, "Idle freq: %d MHz\n",
973
			   intel_gpu_freq(rps, rps->idle_freq));
974
		seq_printf(m, "Min freq: %d MHz\n",
975
			   intel_gpu_freq(rps, rps->min_freq));
976
		seq_printf(m, "Boost freq: %d MHz\n",
977
			   intel_gpu_freq(rps, rps->boost_freq));
978
		seq_printf(m, "Max freq: %d MHz\n",
979
			   intel_gpu_freq(rps, rps->max_freq));
980 981
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
982
			   intel_gpu_freq(rps, rps->efficient_freq));
983
	} else {
984
		seq_puts(m, "no P-state info available\n");
985
	}
986

987
	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
988 989 990
	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);

991
	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
992
	return ret;
993 994
}

995 996
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
997
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
998
	struct intel_rps *rps = &dev_priv->gt.rps;
999
	unsigned int max_gpu_freq, min_gpu_freq;
1000
	intel_wakeref_t wakeref;
1001
	int gpu_freq, ia_freq;
1002

1003 1004
	if (!HAS_LLC(dev_priv))
		return -ENODEV;
1005

1006 1007
	min_gpu_freq = rps->min_freq;
	max_gpu_freq = rps->max_freq;
1008
	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1009
		/* Convert GT frequency to 50 HZ units */
1010 1011
		min_gpu_freq /= GEN9_FREQ_SCALER;
		max_gpu_freq /= GEN9_FREQ_SCALER;
1012 1013
	}

1014
	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1015

1016
	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1017
	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
B
Ben Widawsky 已提交
1018 1019 1020
		ia_freq = gpu_freq;
		sandybridge_pcode_read(dev_priv,
				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1021
				       &ia_freq, NULL);
1022
		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1023 1024 1025 1026 1027
			   intel_gpu_freq(rps,
					  (gpu_freq *
					   (IS_GEN9_BC(dev_priv) ||
					    INTEL_GEN(dev_priv) >= 10 ?
					    GEN9_FREQ_SCALER : 1))),
1028 1029
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
1030
	}
1031
	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1032 1033

	return 0;
1034 1035
}

1036
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1037
{
1038 1039
	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
		   ring->space, ring->head, ring->tail, ring->emit);
1040 1041
}

1042 1043
static int i915_context_status(struct seq_file *m, void *unused)
{
1044 1045
	struct drm_i915_private *i915 = node_to_i915(m->private);
	struct i915_gem_context *ctx, *cn;
1046

1047 1048
	spin_lock(&i915->gem.contexts.lock);
	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1049
		struct i915_gem_engines_iter it;
1050 1051
		struct intel_context *ce;

1052 1053 1054 1055 1056
		if (!kref_get_unless_zero(&ctx->ref))
			continue;

		spin_unlock(&i915->gem.contexts.lock);

1057
		seq_puts(m, "HW context ");
1058
		if (ctx->pid) {
1059 1060
			struct task_struct *task;

1061
			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1062 1063 1064 1065 1066
			if (task) {
				seq_printf(m, "(%s [%d]) ",
					   task->comm, task->pid);
				put_task_struct(task);
			}
1067 1068
		} else if (IS_ERR(ctx->file_priv)) {
			seq_puts(m, "(deleted) ");
1069 1070 1071 1072
		} else {
			seq_puts(m, "(kernel) ");
		}

1073 1074
		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
		seq_putc(m, '\n');
1075

1076 1077
		for_each_gem_engine(ce,
				    i915_gem_context_lock_engines(ctx), it) {
1078
			if (intel_context_pin_if_active(ce)) {
1079 1080
				seq_printf(m, "%s: ", ce->engine->name);
				if (ce->state)
1081
					i915_debugfs_describe_obj(m, ce->state->obj);
1082
				describe_ctx_ring(m, ce->ring);
1083
				seq_putc(m, '\n');
1084
				intel_context_unpin(ce);
1085
			}
1086
		}
1087
		i915_gem_context_unlock_engines(ctx);
1088 1089

		seq_putc(m, '\n');
1090

1091 1092 1093 1094 1095
		spin_lock(&i915->gem.contexts.lock);
		list_safe_reset_next(ctx, cn, link);
		i915_gem_context_put(ctx);
	}
	spin_unlock(&i915->gem.contexts.lock);
1096 1097 1098 1099

	return 0;
}

1100 1101
static const char *swizzle_string(unsigned swizzle)
{
1102
	switch (swizzle) {
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	case I915_BIT_6_SWIZZLE_NONE:
		return "none";
	case I915_BIT_6_SWIZZLE_9:
		return "bit9";
	case I915_BIT_6_SWIZZLE_9_10:
		return "bit9/bit10";
	case I915_BIT_6_SWIZZLE_9_11:
		return "bit9/bit11";
	case I915_BIT_6_SWIZZLE_9_10_11:
		return "bit9/bit10/bit11";
	case I915_BIT_6_SWIZZLE_9_17:
		return "bit9/bit17";
	case I915_BIT_6_SWIZZLE_9_10_17:
		return "bit9/bit10/bit17";
	case I915_BIT_6_SWIZZLE_UNKNOWN:
1118
		return "unknown";
1119 1120 1121 1122 1123 1124 1125
	}

	return "bug";
}

static int i915_swizzle_info(struct seq_file *m, void *data)
{
1126
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1127
	struct intel_uncore *uncore = &dev_priv->uncore;
1128
	intel_wakeref_t wakeref;
1129

1130
	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1131 1132

	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1133
		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1134
	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1135
		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1136

1137
	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1138
		seq_printf(m, "DDC = 0x%08x\n",
1139
			   intel_uncore_read(uncore, DCC));
1140
		seq_printf(m, "DDC2 = 0x%08x\n",
1141
			   intel_uncore_read(uncore, DCC2));
1142
		seq_printf(m, "C0DRB3 = 0x%04x\n",
1143
			   intel_uncore_read16(uncore, C0DRB3));
1144
		seq_printf(m, "C1DRB3 = 0x%04x\n",
1145
			   intel_uncore_read16(uncore, C1DRB3));
1146
	} else if (INTEL_GEN(dev_priv) >= 6) {
1147
		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1148
			   intel_uncore_read(uncore, MAD_DIMM_C0));
1149
		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1150
			   intel_uncore_read(uncore, MAD_DIMM_C1));
1151
		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1152
			   intel_uncore_read(uncore, MAD_DIMM_C2));
1153
		seq_printf(m, "TILECTL = 0x%08x\n",
1154
			   intel_uncore_read(uncore, TILECTL));
1155
		if (INTEL_GEN(dev_priv) >= 8)
B
Ben Widawsky 已提交
1156
			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1157
				   intel_uncore_read(uncore, GAMTARBMODE));
B
Ben Widawsky 已提交
1158 1159
		else
			seq_printf(m, "ARB_MODE = 0x%08x\n",
1160
				   intel_uncore_read(uncore, ARB_MODE));
1161
		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1162
			   intel_uncore_read(uncore, DISP_ARB_CTL));
1163
	}
1164 1165 1166 1167

	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		seq_puts(m, "L-shaped memory detected\n");

1168
	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1169 1170 1171 1172

	return 0;
}

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
static const char *rps_power_to_str(unsigned int power)
{
	static const char * const strings[] = {
		[LOW_POWER] = "low power",
		[BETWEEN] = "mixed",
		[HIGH_POWER] = "high power",
	};

	if (power >= ARRAY_SIZE(strings) || !strings[power])
		return "unknown";

	return strings[power];
}

1187 1188
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
1189
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1190
	struct intel_rps *rps = &dev_priv->gt.rps;
1191

1192
	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1193
	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1194
	seq_printf(m, "Boosts outstanding? %d\n",
1195
		   atomic_read(&rps->num_waiters));
C
Chris Wilson 已提交
1196
	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1197
	seq_printf(m, "Frequency requested %d, actual %d\n",
1198
		   intel_gpu_freq(rps, rps->cur_freq),
1199
		   intel_rps_read_actual_frequency(rps));
1200
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1201 1202 1203 1204
		   intel_gpu_freq(rps, rps->min_freq),
		   intel_gpu_freq(rps, rps->min_freq_softlimit),
		   intel_gpu_freq(rps, rps->max_freq_softlimit),
		   intel_gpu_freq(rps, rps->max_freq));
1205
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1206 1207 1208
		   intel_gpu_freq(rps, rps->idle_freq),
		   intel_gpu_freq(rps, rps->efficient_freq),
		   intel_gpu_freq(rps, rps->boost_freq));
1209

1210
	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1211

1212
	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1213 1214 1215
		u32 rpup, rpupei;
		u32 rpdown, rpdownei;

1216
		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1217 1218 1219 1220
		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1221
		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1222 1223

		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
C
Chris Wilson 已提交
1224
			   rps_power_to_str(rps->power.mode));
1225
		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1226
			   rpup && rpupei ? 100 * rpup / rpupei : 0,
C
Chris Wilson 已提交
1227
			   rps->power.up_threshold);
1228
		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1229
			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
C
Chris Wilson 已提交
1230
			   rps->power.down_threshold);
1231 1232 1233 1234
	} else {
		seq_puts(m, "\nRPS Autotuning inactive\n");
	}

1235
	return 0;
1236 1237
}

1238 1239
static int i915_llc(struct seq_file *m, void *data)
{
1240
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1241
	const bool edram = INTEL_GEN(dev_priv) > 8;
1242

1243
	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1244 1245
	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
		   dev_priv->edram_size_mb);
1246 1247 1248 1249

	return 0;
}

1250
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
1251
{
1252
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
D
David Weinehall 已提交
1253
	struct pci_dev *pdev = dev_priv->drm.pdev;
1254

1255 1256
	if (!HAS_RUNTIME_PM(dev_priv))
		seq_puts(m, "Runtime power management not supported\n");
1257

1258 1259 1260
	seq_printf(m, "Runtime power status: %s\n",
		   enableddisabled(!dev_priv->power_domains.wakeref));

1261
	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
1262
	seq_printf(m, "IRQs disabled: %s\n",
1263
		   yesno(!intel_irqs_enabled(dev_priv)));
1264
#ifdef CONFIG_PM
1265
	seq_printf(m, "Usage count: %d\n",
1266
		   atomic_read(&dev_priv->drm.dev->power.usage_count));
1267 1268 1269
#else
	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
1270
	seq_printf(m, "PCI device power state: %s [%d]\n",
D
David Weinehall 已提交
1271 1272
		   pci_power_name(pdev->current_state),
		   pdev->current_state);
1273

1274 1275 1276
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
		struct drm_printer p = drm_seq_file_printer(m);

1277
		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
1278 1279
	}

1280 1281 1282
	return 0;
}

1283
static int i915_engine_info(struct seq_file *m, void *unused)
1284
{
1285
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1286 1287 1288
	struct intel_engine_cs *engine;
	intel_wakeref_t wakeref;
	struct drm_printer p;
1289

1290
	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1291

1292 1293 1294 1295 1296
	seq_printf(m, "GT awake? %s [%d]\n",
		   yesno(dev_priv->gt.awake),
		   atomic_read(&dev_priv->gt.wakeref.count));
	seq_printf(m, "CS timestamp frequency: %u kHz\n",
		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
1297

1298 1299 1300
	p = drm_seq_file_printer(m);
	for_each_uabi_engine(engine, dev_priv)
		intel_engine_dump(engine, &p, "%s\n", engine->name);
1301

1302
	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1303 1304 1305 1306

	return 0;
}

1307
static int i915_rcs_topology(struct seq_file *m, void *unused)
1308
{
1309
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1310
	struct drm_printer p = drm_seq_file_printer(m);
1311

1312
	intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
1313

1314 1315 1316
	return 0;
}

1317
static int i915_shrinker_info(struct seq_file *m, void *unused)
1318
{
1319
	struct drm_i915_private *i915 = node_to_i915(m->private);
1320

1321 1322
	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
1323

1324
	return 0;
1325 1326
}

1327
static int i915_wa_registers(struct seq_file *m, void *unused)
1328
{
1329 1330
	struct drm_i915_private *i915 = node_to_i915(m->private);
	struct intel_engine_cs *engine;
1331

1332 1333 1334 1335
	for_each_uabi_engine(engine, i915) {
		const struct i915_wa_list *wal = &engine->ctx_wa_list;
		const struct i915_wa *wa;
		unsigned int count;
1336

1337 1338 1339
		count = wal->count;
		if (!count)
			continue;
1340

1341 1342
		seq_printf(m, "%s: Workarounds applied: %u\n",
			   engine->name, count);
1343

1344 1345 1346 1347
		for (wa = wal->list; count--; wa++)
			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
				   i915_mmio_reg_offset(wa->reg),
				   wa->set, wa->clr);
1348

1349 1350
		seq_printf(m, "\n");
	}
1351

1352
	return 0;
1353 1354
}

1355 1356
static int
i915_wedged_get(void *data, u64 *val)
1357
{
1358 1359
	struct drm_i915_private *i915 = data;
	int ret = intel_gt_terminally_wedged(&i915->gt);
1360

1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
	switch (ret) {
	case -EIO:
		*val = 1;
		return 0;
	case 0:
		*val = 0;
		return 0;
	default:
		return ret;
	}
1371 1372
}

1373 1374
static int
i915_wedged_set(void *data, u64 val)
1375
{
1376
	struct drm_i915_private *i915 = data;
1377

1378
	/* Flush any previous reset before applying for a new one */
1379 1380
	wait_event(i915->gt.reset.queue,
		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
1381

1382 1383
	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
			      "Manually set wedged engine mask = %llx", val);
1384
	return 0;
1385 1386
}

1387 1388
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
1389
			"%llu\n");
1390

1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
static int
i915_perf_noa_delay_set(void *data, u64 val)
{
	struct drm_i915_private *i915 = data;
	const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;

	/*
	 * This would lead to infinite waits as we're doing timestamp
	 * difference on the CS with only 32bits.
	 */
	if (val > mul_u32_u32(U32_MAX, clk))
		return -EINVAL;

	atomic64_set(&i915->perf.noa_programming_delay, val);
	return 0;
}

static int
i915_perf_noa_delay_get(void *data, u64 *val)
{
	struct drm_i915_private *i915 = data;

	*val = atomic64_read(&i915->perf.noa_programming_delay);
	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
			i915_perf_noa_delay_get,
			i915_perf_noa_delay_set,
			"%llu\n");

1422 1423 1424 1425 1426 1427 1428
#define DROP_UNBOUND	BIT(0)
#define DROP_BOUND	BIT(1)
#define DROP_RETIRE	BIT(2)
#define DROP_ACTIVE	BIT(3)
#define DROP_FREED	BIT(4)
#define DROP_SHRINK_ALL	BIT(5)
#define DROP_IDLE	BIT(6)
1429 1430
#define DROP_RESET_ACTIVE	BIT(7)
#define DROP_RESET_SEQNO	BIT(8)
1431
#define DROP_RCU	BIT(9)
1432 1433 1434 1435
#define DROP_ALL (DROP_UNBOUND	| \
		  DROP_BOUND	| \
		  DROP_RETIRE	| \
		  DROP_ACTIVE	| \
1436
		  DROP_FREED	| \
1437
		  DROP_SHRINK_ALL |\
1438 1439
		  DROP_IDLE	| \
		  DROP_RESET_ACTIVE | \
1440 1441
		  DROP_RESET_SEQNO | \
		  DROP_RCU)
1442 1443
static int
i915_drop_caches_get(void *data, u64 *val)
1444
{
1445
	*val = DROP_ALL;
1446

1447
	return 0;
1448
}
1449
static int
1450
gt_drop_caches(struct intel_gt *gt, u64 val)
1451
{
1452
	int ret;
1453

1454
	if (val & DROP_RESET_ACTIVE &&
1455 1456
	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
		intel_gt_set_wedged(gt);
1457

1458
	if (val & DROP_RETIRE)
1459
		intel_gt_retire_requests(gt);
1460

1461
	if (val & (DROP_IDLE | DROP_ACTIVE)) {
1462
		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1463
		if (ret)
1464
			return ret;
1465
	}
1466

1467
	if (val & DROP_IDLE) {
1468
		ret = intel_gt_pm_wait_for_idle(gt);
1469 1470
		if (ret)
			return ret;
1471 1472
	}

1473 1474
	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1475

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
	return 0;
}

static int
i915_drop_caches_set(void *data, u64 val)
{
	struct drm_i915_private *i915 = data;
	int ret;

	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
		  val, val & DROP_ALL);

	ret = gt_drop_caches(&i915->gt, val);
	if (ret)
		return ret;

1492
	fs_reclaim_acquire(GFP_KERNEL);
1493
	if (val & DROP_BOUND)
1494
		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
1495

1496
	if (val & DROP_UNBOUND)
1497
		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1498

1499
	if (val & DROP_SHRINK_ALL)
1500
		i915_gem_shrink_all(i915);
1501
	fs_reclaim_release(GFP_KERNEL);
1502

1503 1504 1505
	if (val & DROP_RCU)
		rcu_barrier();

1506
	if (val & DROP_FREED)
1507
		i915_gem_drain_freed_objects(i915);
1508

1509
	return 0;
1510 1511
}

1512 1513 1514
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
			i915_drop_caches_get, i915_drop_caches_set,
			"0x%08llx\n");
1515

1516 1517
static int
i915_cache_sharing_get(void *data, u64 *val)
1518
{
1519
	struct drm_i915_private *dev_priv = data;
1520
	intel_wakeref_t wakeref;
1521
	u32 snpcr = 0;
1522

1523
	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1524 1525
		return -ENODEV;

1526
	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1527
		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1528

1529
	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1530

1531
	return 0;
1532 1533
}

1534 1535
static int
i915_cache_sharing_set(void *data, u64 val)
1536
{
1537
	struct drm_i915_private *dev_priv = data;
1538
	intel_wakeref_t wakeref;
1539

1540
	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1541 1542
		return -ENODEV;

1543
	if (val > 3)
1544 1545
		return -EINVAL;

1546 1547
	drm_dbg(&dev_priv->drm,
		"Manually setting uncore sharing to %llu\n", val);
1548
	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1549 1550 1551 1552 1553 1554 1555 1556
		u32 snpcr;

		/* Update the cache sharing policy here as well */
		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
		snpcr &= ~GEN6_MBC_SNPCR_MASK;
		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
	}
1557

1558
	return 0;
1559 1560
}

1561 1562 1563 1564 1565 1566 1567 1568 1569
static void
intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
			  u8 *to_mask)
{
	int offset = slice * sseu->ss_stride;

	memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
}

1570 1571 1572
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
			i915_cache_sharing_get, i915_cache_sharing_set,
			"%llu\n");
1573

1574
static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
1575
					  struct sseu_dev_info *sseu)
1576
{
1577 1578 1579
#define SS_MAX 2
	const int ss_max = SS_MAX;
	u32 sig1[SS_MAX], sig2[SS_MAX];
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
	int ss;

	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);

	for (ss = 0; ss < ss_max; ss++) {
		unsigned int eu_cnt;

		if (sig1[ss] & CHV_SS_PG_ENABLE)
			/* skip disabled subslice */
			continue;

1594
		sseu->slice_mask = BIT(0);
1595
		sseu->subslice_mask[0] |= BIT(ss);
1596 1597 1598 1599
		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
1600 1601 1602
		sseu->eu_total += eu_cnt;
		sseu->eu_per_subslice = max_t(unsigned int,
					      sseu->eu_per_subslice, eu_cnt);
1603
	}
1604
#undef SS_MAX
1605 1606
}

1607 1608 1609
static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
				     struct sseu_dev_info *sseu)
{
1610
#define SS_MAX 6
1611
	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1612
	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1613 1614
	int s, ss;

1615
	for (s = 0; s < info->sseu.max_slices; s++) {
1616 1617
		/*
		 * FIXME: Valid SS Mask respects the spec and read
1618
		 * only valid bits for those registers, excluding reserved
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
		 * although this seems wrong because it would leave many
		 * subslices without ACK.
		 */
		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
			GEN10_PGCTL_VALID_SS_MASK(s);
		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
	}

	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
		     GEN9_PGCTL_SSA_EU19_ACK |
		     GEN9_PGCTL_SSA_EU210_ACK |
		     GEN9_PGCTL_SSA_EU311_ACK;
	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
		     GEN9_PGCTL_SSB_EU19_ACK |
		     GEN9_PGCTL_SSB_EU210_ACK |
		     GEN9_PGCTL_SSB_EU311_ACK;

1637
	for (s = 0; s < info->sseu.max_slices; s++) {
1638 1639 1640 1641 1642
		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
			/* skip disabled slice */
			continue;

		sseu->slice_mask |= BIT(s);
1643
		intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
1644

1645
		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1646 1647
			unsigned int eu_cnt;

1648 1649
			if (info->sseu.has_subslice_pg &&
			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
				/* skip disabled subslice */
				continue;

			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
					       eu_mask[ss % 2]);
			sseu->eu_total += eu_cnt;
			sseu->eu_per_subslice = max_t(unsigned int,
						      sseu->eu_per_subslice,
						      eu_cnt);
		}
	}
1661
#undef SS_MAX
1662 1663
}

1664
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
1665
				    struct sseu_dev_info *sseu)
1666
{
1667
#define SS_MAX 3
1668
	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1669
	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1670
	int s, ss;
1671

1672
	for (s = 0; s < info->sseu.max_slices; s++) {
1673 1674 1675 1676 1677
		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
	}

1678 1679 1680 1681 1682 1683 1684 1685 1686
	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
		     GEN9_PGCTL_SSA_EU19_ACK |
		     GEN9_PGCTL_SSA_EU210_ACK |
		     GEN9_PGCTL_SSA_EU311_ACK;
	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
		     GEN9_PGCTL_SSB_EU19_ACK |
		     GEN9_PGCTL_SSB_EU210_ACK |
		     GEN9_PGCTL_SSB_EU311_ACK;

1687
	for (s = 0; s < info->sseu.max_slices; s++) {
1688 1689 1690 1691
		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
			/* skip disabled slice */
			continue;

1692
		sseu->slice_mask |= BIT(s);
1693

1694
		if (IS_GEN9_BC(dev_priv))
1695 1696
			intel_sseu_copy_subslices(&info->sseu, s,
						  sseu->subslice_mask);
1697

1698
		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1699
			unsigned int eu_cnt;
S
Stuart Summers 已提交
1700 1701
			u8 ss_idx = s * info->sseu.ss_stride +
				    ss / BITS_PER_BYTE;
1702

1703
			if (IS_GEN9_LP(dev_priv)) {
1704 1705 1706
				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
					/* skip disabled subslice */
					continue;
1707

S
Stuart Summers 已提交
1708 1709
				sseu->subslice_mask[ss_idx] |=
					BIT(ss % BITS_PER_BYTE);
1710
			}
1711

1712 1713
			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
					       eu_mask[ss%2]);
1714 1715 1716 1717
			sseu->eu_total += eu_cnt;
			sseu->eu_per_subslice = max_t(unsigned int,
						      sseu->eu_per_subslice,
						      eu_cnt);
1718 1719
		}
	}
1720
#undef SS_MAX
1721 1722
}

1723 1724
static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
				   struct sseu_dev_info *sseu)
1725
{
1726
	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1727
	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
1728
	int s;
1729

1730
	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
1731

1732
	if (sseu->slice_mask) {
1733 1734
		sseu->eu_per_subslice = info->sseu.eu_per_subslice;
		for (s = 0; s < fls(sseu->slice_mask); s++)
1735 1736
			intel_sseu_copy_subslices(&info->sseu, s,
						  sseu->subslice_mask);
1737
		sseu->eu_total = sseu->eu_per_subslice *
1738
				 intel_sseu_subslice_total(sseu);
1739 1740

		/* subtract fused off EU(s) from enabled slice(s) */
1741
		for (s = 0; s < fls(sseu->slice_mask); s++) {
1742
			u8 subslice_7eu = info->sseu.subslice_7eu[s];
1743

1744
			sseu->eu_total -= hweight8(subslice_7eu);
1745 1746 1747 1748
		}
	}
}

1749 1750 1751 1752 1753
static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
				 const struct sseu_dev_info *sseu)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	const char *type = is_available_info ? "Available" : "Enabled";
1754
	int s;
1755

1756 1757
	seq_printf(m, "  %s Slice Mask: %04x\n", type,
		   sseu->slice_mask);
1758
	seq_printf(m, "  %s Slice Total: %u\n", type,
1759
		   hweight8(sseu->slice_mask));
1760
	seq_printf(m, "  %s Subslice Total: %u\n", type,
1761
		   intel_sseu_subslice_total(sseu));
1762 1763
	for (s = 0; s < fls(sseu->slice_mask); s++) {
		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
1764
			   s, intel_sseu_subslices_per_slice(sseu, s));
1765
	}
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	seq_printf(m, "  %s EU Total: %u\n", type,
		   sseu->eu_total);
	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
		   sseu->eu_per_subslice);

	if (!is_available_info)
		return;

	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
	if (HAS_POOLED_EU(dev_priv))
		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);

	seq_printf(m, "  Has Slice Power Gating: %s\n",
		   yesno(sseu->has_slice_pg));
	seq_printf(m, "  Has Subslice Power Gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	seq_printf(m, "  Has EU Power Gating: %s\n",
		   yesno(sseu->has_eu_pg));
}

1786 1787
static int i915_sseu_status(struct seq_file *m, void *unused)
{
1788
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1789
	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1790
	struct sseu_dev_info sseu;
1791
	intel_wakeref_t wakeref;
1792

1793
	if (INTEL_GEN(dev_priv) < 8)
1794 1795 1796
		return -ENODEV;

	seq_puts(m, "SSEU Device Info\n");
1797
	i915_print_sseu_info(m, true, &info->sseu);
1798

1799
	seq_puts(m, "SSEU Device Status\n");
1800
	memset(&sseu, 0, sizeof(sseu));
1801 1802 1803
	intel_sseu_set_info(&sseu, info->sseu.max_slices,
			    info->sseu.max_subslices,
			    info->sseu.max_eus_per_subslice);
1804

1805
	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1806 1807 1808
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_sseu_device_status(dev_priv, &sseu);
		else if (IS_BROADWELL(dev_priv))
1809
			bdw_sseu_device_status(dev_priv, &sseu);
1810 1811 1812 1813
		else if (IS_GEN(dev_priv, 9))
			gen9_sseu_device_status(dev_priv, &sseu);
		else if (INTEL_GEN(dev_priv) >= 10)
			gen10_sseu_device_status(dev_priv, &sseu);
1814
	}
1815

1816
	i915_print_sseu_info(m, false, &sseu);
1817

1818 1819 1820
	return 0;
}

1821 1822
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
1823
	struct drm_i915_private *i915 = inode->i_private;
1824
	struct intel_gt *gt = &i915->gt;
1825

1826 1827 1828 1829
	atomic_inc(&gt->user_wakeref);
	intel_gt_pm_get(gt);
	if (INTEL_GEN(i915) >= 6)
		intel_uncore_forcewake_user_get(gt->uncore);
1830 1831 1832 1833

	return 0;
}

1834
static int i915_forcewake_release(struct inode *inode, struct file *file)
1835
{
1836
	struct drm_i915_private *i915 = inode->i_private;
1837
	struct intel_gt *gt = &i915->gt;
1838

1839 1840 1841 1842
	if (INTEL_GEN(i915) >= 6)
		intel_uncore_forcewake_user_put(&i915->uncore);
	intel_gt_pm_put(gt);
	atomic_dec(&gt->user_wakeref);
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852

	return 0;
}

static const struct file_operations i915_forcewake_fops = {
	.owner = THIS_MODULE,
	.open = i915_forcewake_open,
	.release = i915_forcewake_release,
};

1853
static const struct drm_info_list i915_debugfs_list[] = {
C
Chris Wilson 已提交
1854
	{"i915_capabilities", i915_capabilities, 0},
1855
	{"i915_gem_objects", i915_gem_object_info, 0},
1856
	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1857
	{"i915_gem_interrupt", i915_interrupt_info, 0},
1858
	{"i915_frequency_info", i915_frequency_info, 0},
1859
	{"i915_ring_freq_table", i915_ring_freq_table, 0},
1860
	{"i915_context_status", i915_context_status, 0},
1861
	{"i915_swizzle_info", i915_swizzle_info, 0},
1862
	{"i915_llc", i915_llc, 0},
1863
	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1864
	{"i915_engine_info", i915_engine_info, 0},
1865
	{"i915_rcs_topology", i915_rcs_topology, 0},
1866
	{"i915_shrinker_info", i915_shrinker_info, 0},
1867
	{"i915_wa_registers", i915_wa_registers, 0},
1868
	{"i915_sseu_status", i915_sseu_status, 0},
1869
	{"i915_rps_boost_info", i915_rps_boost_info, 0},
1870
};
1871
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1872

1873
static const struct i915_debugfs_files {
1874 1875 1876
	const char *name;
	const struct file_operations *fops;
} i915_debugfs_files[] = {
1877
	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
1878 1879 1880
	{"i915_wedged", &i915_wedged_fops},
	{"i915_cache_sharing", &i915_cache_sharing_fops},
	{"i915_gem_drop_caches", &i915_drop_caches_fops},
1881
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1882
	{"i915_error_state", &i915_error_state_fops},
1883
	{"i915_gpu_info", &i915_gpu_info_fops},
1884
#endif
1885 1886
};

1887
int i915_debugfs_register(struct drm_i915_private *dev_priv)
1888
{
1889
	struct drm_minor *minor = dev_priv->drm.primary;
1890
	int i;
1891

1892 1893
	i915_debugfs_params(dev_priv);

1894 1895
	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
			    to_i915(minor->dev), &i915_forcewake_fops);
1896
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
1897 1898 1899 1900 1901
		debugfs_create_file(i915_debugfs_files[i].name,
				    S_IRUGO | S_IWUSR,
				    minor->debugfs_root,
				    to_i915(minor->dev),
				    i915_debugfs_files[i].fops);
1902
	}
1903

1904 1905
	return drm_debugfs_create_files(i915_debugfs_list,
					I915_DEBUGFS_ENTRIES,
1906 1907
					minor->debugfs_root, minor);
}