i915_debugfs.c 21.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *
 */

29
#include <linux/sched/mm.h>
30 31
#include <linux/sort.h>

32
#include <drm/drm_debugfs.h>
33

34
#include "gem/i915_gem_context.h"
35
#include "gt/intel_gt.h"
36
#include "gt/intel_gt_buffer_pool.h"
37
#include "gt/intel_gt_clock_utils.h"
38
#include "gt/intel_gt_pm.h"
39
#include "gt/intel_gt_pm_debugfs.h"
40
#include "gt/intel_gt_requests.h"
41
#include "gt/intel_rc6.h"
42
#include "gt/intel_reset.h"
43
#include "gt/intel_rps.h"
44
#include "gt/intel_sseu_debugfs.h"
45

46
#include "i915_debugfs.h"
47
#include "i915_debugfs_params.h"
48
#include "i915_irq.h"
49
#include "i915_scheduler.h"
50
#include "i915_trace.h"
51
#include "intel_pm.h"
52
#include "intel_sideband.h"
53

54 55 56 57 58
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
	return to_i915(node->minor->dev);
}

59 60
static int i915_capabilities(struct seq_file *m, void *data)
{
61
	struct drm_i915_private *i915 = node_to_i915(m->private);
62
	struct drm_printer p = drm_seq_file_printer(m);
63

64
	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
65

66 67
	intel_device_info_print_static(INTEL_INFO(i915), &p);
	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
68
	intel_gt_info_print(&i915->gt.info, &p);
69
	intel_driver_caps_print(&i915->caps, &p);
70

71
	kernel_param_lock(THIS_MODULE);
72
	i915_params_dump(&i915->params, &p);
73 74
	kernel_param_unlock(THIS_MODULE);

75 76
	return 0;
}
77

78
static char get_tiling_flag(struct drm_i915_gem_object *obj)
79
{
80
	switch (i915_gem_object_get_tiling(obj)) {
81
	default:
82 83 84
	case I915_TILING_NONE: return ' ';
	case I915_TILING_X: return 'X';
	case I915_TILING_Y: return 'Y';
85
	}
86 87
}

88
static char get_global_flag(struct drm_i915_gem_object *obj)
89
{
90
	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
91 92
}

93
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
94
{
C
Chris Wilson 已提交
95
	return obj->mm.mapping ? 'M' : ' ';
B
Ben Widawsky 已提交
96 97
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
static const char *
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
{
	size_t x = 0;

	switch (page_sizes) {
	case 0:
		return "";
	case I915_GTT_PAGE_SIZE_4K:
		return "4K";
	case I915_GTT_PAGE_SIZE_64K:
		return "64K";
	case I915_GTT_PAGE_SIZE_2M:
		return "2M";
	default:
		if (!buf)
			return "M";

		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
			x += snprintf(buf + x, len - x, "2M, ");
		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
			x += snprintf(buf + x, len - x, "64K, ");
		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
			x += snprintf(buf + x, len - x, "4K, ");
		buf[x-2] = '\0';

		return buf;
	}
}

128 129 130 131 132 133 134 135 136 137 138
static const char *stringify_vma_type(const struct i915_vma *vma)
{
	if (i915_vma_is_ggtt(vma))
		return "ggtt";

	if (i915_vma_is_dpt(vma))
		return "dpt";

	return "ppgtt";
}

139 140
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
141
{
142
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
B
Ben Widawsky 已提交
143
	struct i915_vma *vma;
B
Ben Widawsky 已提交
144 145
	int pin_count = 0;

146
	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
147 148
		   &obj->base,
		   get_tiling_flag(obj),
B
Ben Widawsky 已提交
149
		   get_global_flag(obj),
150
		   get_pin_mapped_flag(obj),
151
		   obj->base.size / 1024,
152 153
		   obj->read_domains,
		   obj->write_domain,
154
		   i915_cache_level_str(dev_priv, obj->cache_level),
C
Chris Wilson 已提交
155 156
		   obj->mm.dirty ? " dirty" : "",
		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
157 158
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
159 160

	spin_lock(&obj->vma.lock);
161
	list_for_each_entry(vma, &obj->vma.list, obj_link) {
162 163 164
		if (!drm_mm_node_allocated(&vma->node))
			continue;

165 166 167 168 169
		spin_unlock(&obj->vma.lock);

		if (i915_vma_is_pinned(vma))
			pin_count++;

170 171
		seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
			   stringify_vma_type(vma),
172 173
			   vma->node.start, vma->node.size,
			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
174
		if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
175 176 177 178 179 180 181
			switch (vma->ggtt_view.type) {
			case I915_GGTT_VIEW_NORMAL:
				seq_puts(m, ", normal");
				break;

			case I915_GGTT_VIEW_PARTIAL:
				seq_printf(m, ", partial [%08llx+%x]",
182 183
					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
					   vma->ggtt_view.partial.size << PAGE_SHIFT);
184 185 186
				break;

			case I915_GGTT_VIEW_ROTATED:
187
				seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
188 189
					   vma->ggtt_view.rotated.plane[0].width,
					   vma->ggtt_view.rotated.plane[0].height,
190
					   vma->ggtt_view.rotated.plane[0].src_stride,
191
					   vma->ggtt_view.rotated.plane[0].dst_stride,
192 193 194
					   vma->ggtt_view.rotated.plane[0].offset,
					   vma->ggtt_view.rotated.plane[1].width,
					   vma->ggtt_view.rotated.plane[1].height,
195
					   vma->ggtt_view.rotated.plane[1].src_stride,
196
					   vma->ggtt_view.rotated.plane[1].dst_stride,
197
					   vma->ggtt_view.rotated.plane[1].offset);
198 199
				break;

200
			case I915_GGTT_VIEW_REMAPPED:
201
				seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
202 203
					   vma->ggtt_view.remapped.plane[0].width,
					   vma->ggtt_view.remapped.plane[0].height,
204
					   vma->ggtt_view.remapped.plane[0].src_stride,
205
					   vma->ggtt_view.remapped.plane[0].dst_stride,
206 207 208
					   vma->ggtt_view.remapped.plane[0].offset,
					   vma->ggtt_view.remapped.plane[1].width,
					   vma->ggtt_view.remapped.plane[1].height,
209
					   vma->ggtt_view.remapped.plane[1].src_stride,
210
					   vma->ggtt_view.remapped.plane[1].dst_stride,
211 212 213
					   vma->ggtt_view.remapped.plane[1].offset);
				break;

214 215 216 217 218
			default:
				MISSING_CASE(vma->ggtt_view.type);
				break;
			}
		}
219
		if (vma->fence)
220
			seq_printf(m, " , fence: %d", vma->fence->id);
221
		seq_puts(m, ")");
222 223

		spin_lock(&obj->vma.lock);
B
Ben Widawsky 已提交
224
	}
225 226 227
	spin_unlock(&obj->vma.lock);

	seq_printf(m, " (pinned x %d)", pin_count);
228
	if (i915_gem_object_is_stolen(obj))
229
		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
230 231
	if (i915_gem_object_is_framebuffer(obj))
		seq_printf(m, " (fb)");
232 233
}

234
static int i915_gem_object_info(struct seq_file *m, void *data)
235
{
236
	struct drm_i915_private *i915 = node_to_i915(m->private);
237
	struct drm_printer p = drm_seq_file_printer(m);
238 239
	struct intel_memory_region *mr;
	enum intel_region_id id;
240

241
	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
242
		   i915->mm.shrink_count,
243
		   atomic_read(&i915->mm.free_count),
244
		   i915->mm.shrink_memory);
245
	for_each_memory_region(mr, i915, id)
246
		intel_memory_region_debug(mr, &p);
247 248 249 250

	return 0;
}

251
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
252 253
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
			      size_t count, loff_t *pos)
254
{
255
	struct i915_gpu_coredump *error;
256
	ssize_t ret;
C
Chris Wilson 已提交
257
	void *buf;
258

C
Chris Wilson 已提交
259
	error = file->private_data;
260 261
	if (!error)
		return 0;
262

C
Chris Wilson 已提交
263 264 265 266
	/* Bounce buffer required because of kernfs __user API convenience. */
	buf = kmalloc(count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
267

268
	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
C
Chris Wilson 已提交
269
	if (ret <= 0)
270
		goto out;
271

C
Chris Wilson 已提交
272 273 274 275
	if (!copy_to_user(ubuf, buf, ret))
		*pos += ret;
	else
		ret = -EFAULT;
276

277
out:
C
Chris Wilson 已提交
278
	kfree(buf);
279 280
	return ret;
}
281

282 283
static int gpu_state_release(struct inode *inode, struct file *file)
{
284
	i915_gpu_coredump_put(file->private_data);
285
	return 0;
286 287
}

288
static int i915_gpu_info_open(struct inode *inode, struct file *file)
289
{
290
	struct drm_i915_private *i915 = inode->i_private;
291
	struct i915_gpu_coredump *gpu;
292
	intel_wakeref_t wakeref;
293

294
	gpu = NULL;
295
	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
296
		gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES);
297 298
	if (IS_ERR(gpu))
		return PTR_ERR(gpu);
299

300
	file->private_data = gpu;
301 302 303
	return 0;
}

304 305 306 307 308 309 310 311 312 313 314 315 316
static const struct file_operations i915_gpu_info_fops = {
	.owner = THIS_MODULE,
	.open = i915_gpu_info_open,
	.read = gpu_state_read,
	.llseek = default_llseek,
	.release = gpu_state_release,
};

static ssize_t
i915_error_state_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
317
{
318
	struct i915_gpu_coredump *error = filp->private_data;
319

320 321
	if (!error)
		return 0;
322

323
	drm_dbg(&error->i915->drm, "Resetting error state\n");
324
	i915_reset_error_state(error->i915);
325

326 327
	return cnt;
}
328

329 330
static int i915_error_state_open(struct inode *inode, struct file *file)
{
331
	struct i915_gpu_coredump *error;
332 333 334 335 336 337

	error = i915_first_error_state(inode->i_private);
	if (IS_ERR(error))
		return PTR_ERR(error);

	file->private_data  = error;
338
	return 0;
339 340 341 342 343
}

static const struct file_operations i915_error_state_fops = {
	.owner = THIS_MODULE,
	.open = i915_error_state_open,
344
	.read = gpu_state_read,
345 346
	.write = i915_error_state_write,
	.llseek = default_llseek,
347
	.release = gpu_state_release,
348
};
349 350
#endif

351
static int i915_frequency_info(struct seq_file *m, void *unused)
352
{
353 354 355
	struct drm_i915_private *i915 = node_to_i915(m->private);
	struct intel_gt *gt = &i915->gt;
	struct drm_printer p = drm_seq_file_printer(m);
356

357
	intel_gt_pm_frequency_dump(gt, &p);
358

Z
Zou Wei 已提交
359
	return 0;
360 361
}

362 363
static const char *swizzle_string(unsigned swizzle)
{
364
	switch (swizzle) {
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	case I915_BIT_6_SWIZZLE_NONE:
		return "none";
	case I915_BIT_6_SWIZZLE_9:
		return "bit9";
	case I915_BIT_6_SWIZZLE_9_10:
		return "bit9/bit10";
	case I915_BIT_6_SWIZZLE_9_11:
		return "bit9/bit11";
	case I915_BIT_6_SWIZZLE_9_10_11:
		return "bit9/bit10/bit11";
	case I915_BIT_6_SWIZZLE_9_17:
		return "bit9/bit17";
	case I915_BIT_6_SWIZZLE_9_10_17:
		return "bit9/bit10/bit17";
	case I915_BIT_6_SWIZZLE_UNKNOWN:
380
		return "unknown";
381 382 383 384 385 386 387
	}

	return "bug";
}

static int i915_swizzle_info(struct seq_file *m, void *data)
{
388
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
389
	struct intel_uncore *uncore = &dev_priv->uncore;
390
	intel_wakeref_t wakeref;
391

392
	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
393
		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
394
	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
395
		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
396

397 398 399 400
	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		seq_puts(m, "L-shaped memory detected\n");

	/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
401
	if (GRAPHICS_VER(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
402 403 404 405
		return 0;

	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);

406
	if (IS_GRAPHICS_VER(dev_priv, 3, 4)) {
407
		seq_printf(m, "DDC = 0x%08x\n",
408
			   intel_uncore_read(uncore, DCC));
409
		seq_printf(m, "DDC2 = 0x%08x\n",
410
			   intel_uncore_read(uncore, DCC2));
411
		seq_printf(m, "C0DRB3 = 0x%04x\n",
412
			   intel_uncore_read16(uncore, C0DRB3_BW));
413
		seq_printf(m, "C1DRB3 = 0x%04x\n",
414
			   intel_uncore_read16(uncore, C1DRB3_BW));
415
	} else if (GRAPHICS_VER(dev_priv) >= 6) {
416
		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
417
			   intel_uncore_read(uncore, MAD_DIMM_C0));
418
		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
419
			   intel_uncore_read(uncore, MAD_DIMM_C1));
420
		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
421
			   intel_uncore_read(uncore, MAD_DIMM_C2));
422
		seq_printf(m, "TILECTL = 0x%08x\n",
423
			   intel_uncore_read(uncore, TILECTL));
424
		if (GRAPHICS_VER(dev_priv) >= 8)
B
Ben Widawsky 已提交
425
			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
426
				   intel_uncore_read(uncore, GAMTARBMODE));
B
Ben Widawsky 已提交
427 428
		else
			seq_printf(m, "ARB_MODE = 0x%08x\n",
429
				   intel_uncore_read(uncore, ARB_MODE));
430
		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
431
			   intel_uncore_read(uncore, DISP_ARB_CTL));
432
	}
433

434
	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
435 436 437 438

	return 0;
}

439 440
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
441
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
442
	struct intel_rps *rps = &dev_priv->gt.rps;
443

444 445
	seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
	seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
446
	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
447
	seq_printf(m, "Boosts outstanding? %d\n",
448
		   atomic_read(&rps->num_waiters));
C
Chris Wilson 已提交
449
	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
450
	seq_printf(m, "Frequency requested %d, actual %d\n",
451
		   intel_gpu_freq(rps, rps->cur_freq),
452
		   intel_rps_read_actual_frequency(rps));
453
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
454 455 456 457
		   intel_gpu_freq(rps, rps->min_freq),
		   intel_gpu_freq(rps, rps->min_freq_softlimit),
		   intel_gpu_freq(rps, rps->max_freq_softlimit),
		   intel_gpu_freq(rps, rps->max_freq));
458
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
459 460 461
		   intel_gpu_freq(rps, rps->idle_freq),
		   intel_gpu_freq(rps, rps->efficient_freq),
		   intel_gpu_freq(rps, rps->boost_freq));
462

463
	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
464

465 466 467
	return 0;
}

468
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
469
{
470
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
471
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
472

473 474
	if (!HAS_RUNTIME_PM(dev_priv))
		seq_puts(m, "Runtime power management not supported\n");
475

476
	seq_printf(m, "Runtime power status: %s\n",
477
		   enableddisabled(!dev_priv->power_domains.init_wakeref));
478

479
	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
480
	seq_printf(m, "IRQs disabled: %s\n",
481
		   yesno(!intel_irqs_enabled(dev_priv)));
482
#ifdef CONFIG_PM
483
	seq_printf(m, "Usage count: %d\n",
484
		   atomic_read(&dev_priv->drm.dev->power.usage_count));
485 486 487
#else
	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
488
	seq_printf(m, "PCI device power state: %s [%d]\n",
D
David Weinehall 已提交
489 490
		   pci_power_name(pdev->current_state),
		   pdev->current_state);
491

492 493 494
	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
		struct drm_printer p = drm_seq_file_printer(m);

495
		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
496 497
	}

498 499 500
	return 0;
}

501
static int i915_engine_info(struct seq_file *m, void *unused)
502
{
503
	struct drm_i915_private *i915 = node_to_i915(m->private);
504 505 506
	struct intel_engine_cs *engine;
	intel_wakeref_t wakeref;
	struct drm_printer p;
507

508
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
509

510
	seq_printf(m, "GT awake? %s [%d], %llums\n",
511
		   yesno(i915->gt.awake),
512 513
		   atomic_read(&i915->gt.wakeref.count),
		   ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
514 515 516
	seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
		   i915->gt.clock_frequency,
		   i915->gt.clock_period_ns);
517

518
	p = drm_seq_file_printer(m);
519
	for_each_uabi_engine(engine, i915)
520
		intel_engine_dump(engine, &p, "%s\n", engine->name);
521

522
	intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
523 524

	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
525 526 527 528

	return 0;
}

529
static int i915_wa_registers(struct seq_file *m, void *unused)
530
{
531 532
	struct drm_i915_private *i915 = node_to_i915(m->private);
	struct intel_engine_cs *engine;
533

534 535 536 537
	for_each_uabi_engine(engine, i915) {
		const struct i915_wa_list *wal = &engine->ctx_wa_list;
		const struct i915_wa *wa;
		unsigned int count;
538

539 540 541
		count = wal->count;
		if (!count)
			continue;
542

543 544
		seq_printf(m, "%s: Workarounds applied: %u\n",
			   engine->name, count);
545

546 547 548 549
		for (wa = wal->list; count--; wa++)
			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
				   i915_mmio_reg_offset(wa->reg),
				   wa->set, wa->clr);
550

551 552
		seq_printf(m, "\n");
	}
553

554
	return 0;
555 556
}

557 558
static int
i915_wedged_get(void *data, u64 *val)
559
{
560 561
	struct drm_i915_private *i915 = data;
	int ret = intel_gt_terminally_wedged(&i915->gt);
562

563 564 565 566 567 568 569 570 571 572
	switch (ret) {
	case -EIO:
		*val = 1;
		return 0;
	case 0:
		*val = 0;
		return 0;
	default:
		return ret;
	}
573 574
}

575 576
static int
i915_wedged_set(void *data, u64 val)
577
{
578
	struct drm_i915_private *i915 = data;
579

580
	/* Flush any previous reset before applying for a new one */
581 582
	wait_event(i915->gt.reset.queue,
		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
583

584 585
	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
			      "Manually set wedged engine mask = %llx", val);
586
	return 0;
587 588
}

589 590
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
591
			"%llu\n");
592

593 594 595 596 597 598 599 600 601
static int
i915_perf_noa_delay_set(void *data, u64 val)
{
	struct drm_i915_private *i915 = data;

	/*
	 * This would lead to infinite waits as we're doing timestamp
	 * difference on the CS with only 32bits.
	 */
602
	if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
		return -EINVAL;

	atomic64_set(&i915->perf.noa_programming_delay, val);
	return 0;
}

static int
i915_perf_noa_delay_get(void *data, u64 *val)
{
	struct drm_i915_private *i915 = data;

	*val = atomic64_read(&i915->perf.noa_programming_delay);
	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
			i915_perf_noa_delay_get,
			i915_perf_noa_delay_set,
			"%llu\n");

623 624 625 626 627 628 629
#define DROP_UNBOUND	BIT(0)
#define DROP_BOUND	BIT(1)
#define DROP_RETIRE	BIT(2)
#define DROP_ACTIVE	BIT(3)
#define DROP_FREED	BIT(4)
#define DROP_SHRINK_ALL	BIT(5)
#define DROP_IDLE	BIT(6)
630 631
#define DROP_RESET_ACTIVE	BIT(7)
#define DROP_RESET_SEQNO	BIT(8)
632
#define DROP_RCU	BIT(9)
633 634 635 636
#define DROP_ALL (DROP_UNBOUND	| \
		  DROP_BOUND	| \
		  DROP_RETIRE	| \
		  DROP_ACTIVE	| \
637
		  DROP_FREED	| \
638
		  DROP_SHRINK_ALL |\
639 640
		  DROP_IDLE	| \
		  DROP_RESET_ACTIVE | \
641 642
		  DROP_RESET_SEQNO | \
		  DROP_RCU)
643 644
static int
i915_drop_caches_get(void *data, u64 *val)
645
{
646
	*val = DROP_ALL;
647

648
	return 0;
649
}
650
static int
651
gt_drop_caches(struct intel_gt *gt, u64 val)
652
{
653
	int ret;
654

655
	if (val & DROP_RESET_ACTIVE &&
656 657
	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
		intel_gt_set_wedged(gt);
658

659
	if (val & DROP_RETIRE)
660
		intel_gt_retire_requests(gt);
661

662
	if (val & (DROP_IDLE | DROP_ACTIVE)) {
663
		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
664
		if (ret)
665
			return ret;
666
	}
667

668
	if (val & DROP_IDLE) {
669
		ret = intel_gt_pm_wait_for_idle(gt);
670 671
		if (ret)
			return ret;
672 673
	}

674 675
	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
676

677 678 679
	if (val & DROP_FREED)
		intel_gt_flush_buffer_pool(gt);

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
	return 0;
}

static int
i915_drop_caches_set(void *data, u64 val)
{
	struct drm_i915_private *i915 = data;
	int ret;

	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
		  val, val & DROP_ALL);

	ret = gt_drop_caches(&i915->gt, val);
	if (ret)
		return ret;

696
	fs_reclaim_acquire(GFP_KERNEL);
697
	if (val & DROP_BOUND)
698
		i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
699

700
	if (val & DROP_UNBOUND)
701
		i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
702

703
	if (val & DROP_SHRINK_ALL)
704
		i915_gem_shrink_all(i915);
705
	fs_reclaim_release(GFP_KERNEL);
706

707 708 709
	if (val & DROP_RCU)
		rcu_barrier();

710
	if (val & DROP_FREED)
711
		i915_gem_drain_freed_objects(i915);
712

713
	return 0;
714 715
}

716 717 718
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
			i915_drop_caches_get, i915_drop_caches_set,
			"0x%08llx\n");
719

720 721
static int i915_sseu_status(struct seq_file *m, void *unused)
{
722 723
	struct drm_i915_private *i915 = node_to_i915(m->private);
	struct intel_gt *gt = &i915->gt;
724

725
	return intel_sseu_status(m, gt);
726 727
}

728 729
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
730
	struct drm_i915_private *i915 = inode->i_private;
731
	struct intel_gt *gt = &i915->gt;
732

733 734
	atomic_inc(&gt->user_wakeref);
	intel_gt_pm_get(gt);
735
	if (GRAPHICS_VER(i915) >= 6)
736
		intel_uncore_forcewake_user_get(gt->uncore);
737 738 739 740

	return 0;
}

741
static int i915_forcewake_release(struct inode *inode, struct file *file)
742
{
743
	struct drm_i915_private *i915 = inode->i_private;
744
	struct intel_gt *gt = &i915->gt;
745

746
	if (GRAPHICS_VER(i915) >= 6)
747 748 749
		intel_uncore_forcewake_user_put(&i915->uncore);
	intel_gt_pm_put(gt);
	atomic_dec(&gt->user_wakeref);
750 751 752 753 754 755 756 757 758 759

	return 0;
}

static const struct file_operations i915_forcewake_fops = {
	.owner = THIS_MODULE,
	.open = i915_forcewake_open,
	.release = i915_forcewake_release,
};

760
static const struct drm_info_list i915_debugfs_list[] = {
C
Chris Wilson 已提交
761
	{"i915_capabilities", i915_capabilities, 0},
762
	{"i915_gem_objects", i915_gem_object_info, 0},
763
	{"i915_frequency_info", i915_frequency_info, 0},
764
	{"i915_swizzle_info", i915_swizzle_info, 0},
765
	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
766
	{"i915_engine_info", i915_engine_info, 0},
767
	{"i915_wa_registers", i915_wa_registers, 0},
768
	{"i915_sseu_status", i915_sseu_status, 0},
769
	{"i915_rps_boost_info", i915_rps_boost_info, 0},
770
};
771
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
772

773
static const struct i915_debugfs_files {
774 775 776
	const char *name;
	const struct file_operations *fops;
} i915_debugfs_files[] = {
777
	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
778 779
	{"i915_wedged", &i915_wedged_fops},
	{"i915_gem_drop_caches", &i915_drop_caches_fops},
780
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
781
	{"i915_error_state", &i915_error_state_fops},
782
	{"i915_gpu_info", &i915_gpu_info_fops},
783
#endif
784 785
};

786
void i915_debugfs_register(struct drm_i915_private *dev_priv)
787
{
788
	struct drm_minor *minor = dev_priv->drm.primary;
789
	int i;
790

791 792
	i915_debugfs_params(dev_priv);

793 794
	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
			    to_i915(minor->dev), &i915_forcewake_fops);
795
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
796 797 798 799 800
		debugfs_create_file(i915_debugfs_files[i].name,
				    S_IRUGO | S_IWUSR,
				    minor->debugfs_root,
				    to_i915(minor->dev),
				    i915_debugfs_files[i].fops);
801
	}
802

803 804 805
	drm_debugfs_create_files(i915_debugfs_list,
				 I915_DEBUGFS_ENTRIES,
				 minor->debugfs_root, minor);
806
}