i915_debugfs.c 136.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *
 */

29
#include <linux/debugfs.h>
30
#include <linux/sort.h>
31
#include <linux/sched/mm.h>
32
#include "intel_drv.h"
33
#include "i915_guc_submission.h"
34

35 36 37 38 39
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
	return to_i915(node->minor->dev);
}

40 41 42 43 44 45 46 47 48 49 50
static __always_inline void seq_print_param(struct seq_file *m,
					    const char *name,
					    const char *type,
					    const void *x)
{
	if (!__builtin_strcmp(type, "bool"))
		seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
	else if (!__builtin_strcmp(type, "int"))
		seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
	else if (!__builtin_strcmp(type, "unsigned int"))
		seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
51 52
	else if (!__builtin_strcmp(type, "char *"))
		seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
53 54 55 56
	else
		BUILD_BUG();
}

57 58
static int i915_capabilities(struct seq_file *m, void *data)
{
59 60
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	const struct intel_device_info *info = INTEL_INFO(dev_priv);
61

62
	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
63
	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
64
	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
65

66
#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
67
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
68
#undef PRINT_FLAG
69

70
	kernel_param_lock(THIS_MODULE);
71
#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
72 73 74 75
	I915_PARAMS_FOR_EACH(PRINT_PARAM);
#undef PRINT_PARAM
	kernel_param_unlock(THIS_MODULE);

76 77
	return 0;
}
78

79
static char get_active_flag(struct drm_i915_gem_object *obj)
80
{
81
	return i915_gem_object_is_active(obj) ? '*' : ' ';
82 83
}

84
static char get_pin_flag(struct drm_i915_gem_object *obj)
85 86 87 88
{
	return obj->pin_display ? 'p' : ' ';
}

89
static char get_tiling_flag(struct drm_i915_gem_object *obj)
90
{
91
	switch (i915_gem_object_get_tiling(obj)) {
92
	default:
93 94 95
	case I915_TILING_NONE: return ' ';
	case I915_TILING_X: return 'X';
	case I915_TILING_Y: return 'Y';
96
	}
97 98
}

99
static char get_global_flag(struct drm_i915_gem_object *obj)
100
{
101
	return obj->userfault_count ? 'g' : ' ';
102 103
}

104
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
105
{
C
Chris Wilson 已提交
106
	return obj->mm.mapping ? 'M' : ' ';
B
Ben Widawsky 已提交
107 108
}

109 110 111 112 113
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
{
	u64 size = 0;
	struct i915_vma *vma;

114
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
115
		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
116 117 118 119 120 121
			size += vma->node.size;
	}

	return size;
}

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static const char *
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
{
	size_t x = 0;

	switch (page_sizes) {
	case 0:
		return "";
	case I915_GTT_PAGE_SIZE_4K:
		return "4K";
	case I915_GTT_PAGE_SIZE_64K:
		return "64K";
	case I915_GTT_PAGE_SIZE_2M:
		return "2M";
	default:
		if (!buf)
			return "M";

		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
			x += snprintf(buf + x, len - x, "2M, ");
		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
			x += snprintf(buf + x, len - x, "64K, ");
		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
			x += snprintf(buf + x, len - x, "4K, ");
		buf[x-2] = '\0';

		return buf;
	}
}

152 153 154
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
155
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
156
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
157
	struct i915_vma *vma;
158
	unsigned int frontbuffer_bits;
B
Ben Widawsky 已提交
159 160
	int pin_count = 0;

161 162
	lockdep_assert_held(&obj->base.dev->struct_mutex);

163
	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
164
		   &obj->base,
165
		   get_active_flag(obj),
166 167
		   get_pin_flag(obj),
		   get_tiling_flag(obj),
B
Ben Widawsky 已提交
168
		   get_global_flag(obj),
169
		   get_pin_mapped_flag(obj),
170
		   obj->base.size / 1024,
171
		   obj->base.read_domains,
172
		   obj->base.write_domain,
173
		   i915_cache_level_str(dev_priv, obj->cache_level),
C
Chris Wilson 已提交
174 175
		   obj->mm.dirty ? " dirty" : "",
		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
176 177
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
178
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
179
		if (i915_vma_is_pinned(vma))
B
Ben Widawsky 已提交
180
			pin_count++;
D
Dan Carpenter 已提交
181 182
	}
	seq_printf(m, " (pinned x %d)", pin_count);
183 184
	if (obj->pin_display)
		seq_printf(m, " (display)");
185
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
186 187 188
		if (!drm_mm_node_allocated(&vma->node))
			continue;

189
		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
190
			   i915_vma_is_ggtt(vma) ? "g" : "pp",
191 192
			   vma->node.start, vma->node.size,
			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
193 194 195 196 197 198 199 200
		if (i915_vma_is_ggtt(vma)) {
			switch (vma->ggtt_view.type) {
			case I915_GGTT_VIEW_NORMAL:
				seq_puts(m, ", normal");
				break;

			case I915_GGTT_VIEW_PARTIAL:
				seq_printf(m, ", partial [%08llx+%x]",
201 202
					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
					   vma->ggtt_view.partial.size << PAGE_SHIFT);
203 204 205 206
				break;

			case I915_GGTT_VIEW_ROTATED:
				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
207 208 209 210 211 212 213 214
					   vma->ggtt_view.rotated.plane[0].width,
					   vma->ggtt_view.rotated.plane[0].height,
					   vma->ggtt_view.rotated.plane[0].stride,
					   vma->ggtt_view.rotated.plane[0].offset,
					   vma->ggtt_view.rotated.plane[1].width,
					   vma->ggtt_view.rotated.plane[1].height,
					   vma->ggtt_view.rotated.plane[1].stride,
					   vma->ggtt_view.rotated.plane[1].offset);
215 216 217 218 219 220 221
				break;

			default:
				MISSING_CASE(vma->ggtt_view.type);
				break;
			}
		}
222 223 224 225
		if (vma->fence)
			seq_printf(m, " , fence: %d%s",
				   vma->fence->id,
				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
226
		seq_puts(m, ")");
B
Ben Widawsky 已提交
227
	}
228
	if (obj->stolen)
229
		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
230

231
	engine = i915_gem_object_last_write_engine(obj);
232 233 234
	if (engine)
		seq_printf(m, " (%s)", engine->name);

235 236 237
	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
	if (frontbuffer_bits)
		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
238 239
}

240
static int obj_rank_by_stolen(const void *A, const void *B)
241
{
242 243 244 245
	const struct drm_i915_gem_object *a =
		*(const struct drm_i915_gem_object **)A;
	const struct drm_i915_gem_object *b =
		*(const struct drm_i915_gem_object **)B;
246

R
Rasmus Villemoes 已提交
247 248 249 250 251
	if (a->stolen->start < b->stolen->start)
		return -1;
	if (a->stolen->start > b->stolen->start)
		return 1;
	return 0;
252 253 254 255
}

static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
256 257
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
258
	struct drm_i915_gem_object **objects;
259
	struct drm_i915_gem_object *obj;
260
	u64 total_obj_size, total_gtt_size;
261 262 263 264
	unsigned long total, count, n;
	int ret;

	total = READ_ONCE(dev_priv->mm.object_count);
M
Michal Hocko 已提交
265
	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
266 267
	if (!objects)
		return -ENOMEM;
268 269 270

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
271
		goto out;
272 273

	total_obj_size = total_gtt_size = count = 0;
274
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
275 276 277
		if (count == total)
			break;

278 279 280
		if (obj->stolen == NULL)
			continue;

281
		objects[count++] = obj;
282
		total_obj_size += obj->base.size;
283
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
284

285
	}
286
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
287 288 289
		if (count == total)
			break;

290 291 292
		if (obj->stolen == NULL)
			continue;

293
		objects[count++] = obj;
294 295
		total_obj_size += obj->base.size;
	}
296 297 298

	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);

299
	seq_puts(m, "Stolen:\n");
300
	for (n = 0; n < count; n++) {
301
		seq_puts(m, "   ");
302
		describe_obj(m, objects[n]);
303 304
		seq_putc(m, '\n');
	}
305
	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
306
		   count, total_obj_size, total_gtt_size);
307 308 309

	mutex_unlock(&dev->struct_mutex);
out:
M
Michal Hocko 已提交
310
	kvfree(objects);
311
	return ret;
312 313
}

314
struct file_stats {
315
	struct drm_i915_file_private *file_priv;
316 317 318 319
	unsigned long count;
	u64 total, unbound;
	u64 global, shared;
	u64 active, inactive;
320 321 322 323 324 325
};

static int per_file_stats(int id, void *ptr, void *data)
{
	struct drm_i915_gem_object *obj = ptr;
	struct file_stats *stats = data;
326
	struct i915_vma *vma;
327

328 329
	lockdep_assert_held(&obj->base.dev->struct_mutex);

330 331
	stats->count++;
	stats->total += obj->base.size;
332 333
	if (!obj->bind_count)
		stats->unbound += obj->base.size;
334 335 336
	if (obj->base.name || obj->base.dma_buf)
		stats->shared += obj->base.size;

337 338 339
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!drm_mm_node_allocated(&vma->node))
			continue;
340

341
		if (i915_vma_is_ggtt(vma)) {
342 343 344
			stats->global += vma->node.size;
		} else {
			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
345

346
			if (ppgtt->base.file != stats->file_priv)
347 348
				continue;
		}
349

350
		if (i915_vma_is_active(vma))
351 352 353
			stats->active += vma->node.size;
		else
			stats->inactive += vma->node.size;
354 355 356 357 358
	}

	return 0;
}

359 360
#define print_file_stats(m, name, stats) do { \
	if (stats.count) \
361
		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
362 363 364 365 366 367 368 369 370
			   name, \
			   stats.count, \
			   stats.total, \
			   stats.active, \
			   stats.inactive, \
			   stats.global, \
			   stats.shared, \
			   stats.unbound); \
} while (0)
371 372 373 374 375 376

static void print_batch_pool_stats(struct seq_file *m,
				   struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
	struct file_stats stats;
377
	struct intel_engine_cs *engine;
378
	enum intel_engine_id id;
379
	int j;
380 381 382

	memset(&stats, 0, sizeof(stats));

383
	for_each_engine(engine, dev_priv, id) {
384
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
385
			list_for_each_entry(obj,
386
					    &engine->batch_pool.cache_list[j],
387 388 389
					    batch_pool_link)
				per_file_stats(0, obj, &stats);
		}
390
	}
391

392
	print_file_stats(m, "[k]batch pool", stats);
393 394
}

395 396 397 398 399 400 401
static int per_file_ctx_stats(int id, void *ptr, void *data)
{
	struct i915_gem_context *ctx = ptr;
	int n;

	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
		if (ctx->engine[n].state)
402
			per_file_stats(0, ctx->engine[n].state->obj, data);
403
		if (ctx->engine[n].ring)
404
			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
405 406 407 408 409 410 411 412
	}

	return 0;
}

static void print_context_stats(struct seq_file *m,
				struct drm_i915_private *dev_priv)
{
413
	struct drm_device *dev = &dev_priv->drm;
414 415 416 417 418
	struct file_stats stats;
	struct drm_file *file;

	memset(&stats, 0, sizeof(stats));

419
	mutex_lock(&dev->struct_mutex);
420 421 422
	if (dev_priv->kernel_context)
		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);

423
	list_for_each_entry(file, &dev->filelist, lhead) {
424 425 426
		struct drm_i915_file_private *fpriv = file->driver_priv;
		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
	}
427
	mutex_unlock(&dev->struct_mutex);
428 429 430 431

	print_file_stats(m, "[k]contexts", stats);
}

432
static int i915_gem_object_info(struct seq_file *m, void *data)
433
{
434 435
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
436
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
437 438
	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
439
	struct drm_i915_gem_object *obj;
440
	unsigned int page_sizes = 0;
441
	struct drm_file *file;
442
	char buf[80];
443 444 445 446 447 448
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

449
	seq_printf(m, "%u objects, %llu bytes\n",
450 451 452
		   dev_priv->mm.object_count,
		   dev_priv->mm.object_memory);

453 454 455
	size = count = 0;
	mapped_size = mapped_count = 0;
	purgeable_size = purgeable_count = 0;
456
	huge_size = huge_count = 0;
457
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
458 459 460
		size += obj->base.size;
		++count;

C
Chris Wilson 已提交
461
		if (obj->mm.madv == I915_MADV_DONTNEED) {
462 463 464 465
			purgeable_size += obj->base.size;
			++purgeable_count;
		}

C
Chris Wilson 已提交
466
		if (obj->mm.mapping) {
467 468
			mapped_count++;
			mapped_size += obj->base.size;
469
		}
470 471 472 473 474 475

		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
			huge_count++;
			huge_size += obj->base.size;
			page_sizes |= obj->mm.page_sizes.sg;
		}
476
	}
477
	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
C
Chris Wilson 已提交
478

479
	size = count = dpy_size = dpy_count = 0;
480
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
481 482 483
		size += obj->base.size;
		++count;

484
		if (obj->pin_display) {
485 486
			dpy_size += obj->base.size;
			++dpy_count;
487
		}
488

C
Chris Wilson 已提交
489
		if (obj->mm.madv == I915_MADV_DONTNEED) {
490 491 492
			purgeable_size += obj->base.size;
			++purgeable_count;
		}
493

C
Chris Wilson 已提交
494
		if (obj->mm.mapping) {
495 496
			mapped_count++;
			mapped_size += obj->base.size;
497
		}
498 499 500 501 502 503

		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
			huge_count++;
			huge_size += obj->base.size;
			page_sizes |= obj->mm.page_sizes.sg;
		}
504
	}
505 506
	seq_printf(m, "%u bound objects, %llu bytes\n",
		   count, size);
507
	seq_printf(m, "%u purgeable objects, %llu bytes\n",
508
		   purgeable_count, purgeable_size);
509 510
	seq_printf(m, "%u mapped objects, %llu bytes\n",
		   mapped_count, mapped_size);
511 512 513 514
	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
		   huge_count,
		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
		   huge_size);
515 516
	seq_printf(m, "%u display objects (pinned), %llu bytes\n",
		   dpy_count, dpy_size);
517

518
	seq_printf(m, "%llu [%llu] gtt total\n",
519
		   ggtt->base.total, ggtt->mappable_end);
520 521 522
	seq_printf(m, "Supported page sizes: %s\n",
		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
					buf, sizeof(buf)));
523

524 525
	seq_putc(m, '\n');
	print_batch_pool_stats(m, dev_priv);
526 527 528
	mutex_unlock(&dev->struct_mutex);

	mutex_lock(&dev->filelist_mutex);
529
	print_context_stats(m, dev_priv);
530 531
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct file_stats stats;
532 533
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct drm_i915_gem_request *request;
534
		struct task_struct *task;
535

536 537
		mutex_lock(&dev->struct_mutex);

538
		memset(&stats, 0, sizeof(stats));
539
		stats.file_priv = file->driver_priv;
540
		spin_lock(&file->table_lock);
541
		idr_for_each(&file->object_idr, per_file_stats, &stats);
542
		spin_unlock(&file->table_lock);
543 544 545 546 547 548
		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
549 550
		request = list_first_entry_or_null(&file_priv->mm.request_list,
						   struct drm_i915_gem_request,
551
						   client_link);
552
		rcu_read_lock();
553 554 555
		task = pid_task(request && request->ctx->pid ?
				request->ctx->pid : file->pid,
				PIDTYPE_PID);
556
		print_file_stats(m, task ? task->comm : "<unknown>", stats);
557
		rcu_read_unlock();
558

559
		mutex_unlock(&dev->struct_mutex);
560
	}
561
	mutex_unlock(&dev->filelist_mutex);
562 563 564 565

	return 0;
}

566
static int i915_gem_gtt_info(struct seq_file *m, void *data)
567
{
568
	struct drm_info_node *node = m->private;
569 570
	struct drm_i915_private *dev_priv = node_to_i915(node);
	struct drm_device *dev = &dev_priv->drm;
571
	bool show_pin_display_only = !!node->info_ent->data;
572
	struct drm_i915_gem_object *obj;
573
	u64 total_obj_size, total_gtt_size;
574 575 576 577 578 579 580
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
581
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
582
		if (show_pin_display_only && !obj->pin_display)
583 584
			continue;

585
		seq_puts(m, "   ");
586
		describe_obj(m, obj);
587
		seq_putc(m, '\n');
588
		total_obj_size += obj->base.size;
589
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
590 591 592 593 594
		count++;
	}

	mutex_unlock(&dev->struct_mutex);

595
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
596 597 598 599 600
		   count, total_obj_size, total_gtt_size);

	return 0;
}

601 602
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
603 604
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
605
	struct drm_i915_gem_object *obj;
606
	struct intel_engine_cs *engine;
607
	enum intel_engine_id id;
608
	int total = 0;
609
	int ret, j;
610 611 612 613 614

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

615
	for_each_engine(engine, dev_priv, id) {
616
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
617 618 619 620
			int count;

			count = 0;
			list_for_each_entry(obj,
621
					    &engine->batch_pool.cache_list[j],
622 623 624
					    batch_pool_link)
				count++;
			seq_printf(m, "%s cache[%d]: %d objects\n",
625
				   engine->name, j, count);
626 627

			list_for_each_entry(obj,
628
					    &engine->batch_pool.cache_list[j],
629 630 631 632 633 634 635
					    batch_pool_link) {
				seq_puts(m, "   ");
				describe_obj(m, obj);
				seq_putc(m, '\n');
			}

			total += count;
636
		}
637 638
	}

639
	seq_printf(m, "total: %d\n", total);
640 641 642 643 644 645

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

646 647 648 649
static void print_request(struct seq_file *m,
			  struct drm_i915_gem_request *rq,
			  const char *prefix)
{
650
	seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
651
		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
652
		   rq->priotree.priority,
653
		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
654
		   rq->timeline->common->name);
655 656
}

657 658
static int i915_gem_request_info(struct seq_file *m, void *data)
{
659 660
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
D
Daniel Vetter 已提交
661
	struct drm_i915_gem_request *req;
662 663
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
664
	int ret, any;
665 666 667 668

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
669

670
	any = 0;
671
	for_each_engine(engine, dev_priv, id) {
672 673 674
		int count;

		count = 0;
675
		list_for_each_entry(req, &engine->timeline->requests, link)
676 677
			count++;
		if (count == 0)
678 679
			continue;

680
		seq_printf(m, "%s requests: %d\n", engine->name, count);
681
		list_for_each_entry(req, &engine->timeline->requests, link)
682
			print_request(m, req, "    ");
683 684

		any++;
685
	}
686 687
	mutex_unlock(&dev->struct_mutex);

688
	if (any == 0)
689
		seq_puts(m, "No requests\n");
690

691 692 693
	return 0;
}

694
static void i915_ring_seqno_info(struct seq_file *m,
695
				 struct intel_engine_cs *engine)
696
{
697 698 699
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
	struct rb_node *rb;

700
	seq_printf(m, "Current sequence (%s): %x\n",
701
		   engine->name, intel_engine_get_seqno(engine));
702

703
	spin_lock_irq(&b->rb_lock);
704
	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
G
Geliang Tang 已提交
705
		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
706 707 708 709

		seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
			   engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
	}
710
	spin_unlock_irq(&b->rb_lock);
711 712
}

713 714
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
715
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
716
	struct intel_engine_cs *engine;
717
	enum intel_engine_id id;
718

719
	for_each_engine(engine, dev_priv, id)
720
		i915_ring_seqno_info(m, engine);
721

722 723 724 725 726 727
	return 0;
}


static int i915_interrupt_info(struct seq_file *m, void *data)
{
728
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
729
	struct intel_engine_cs *engine;
730
	enum intel_engine_id id;
731
	int i, pipe;
732

733
	intel_runtime_pm_get(dev_priv);
734

735
	if (IS_CHERRYVIEW(dev_priv)) {
736 737 738 739 740 741 742 743 744 745 746
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
747 748 749 750 751 752 753 754 755 756 757
		for_each_pipe(dev_priv, pipe) {
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}

758 759 760 761
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

762 763 764 765
			intel_display_power_put(dev_priv, power_domain);
		}

		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
766 767 768 769 770 771
		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));
772
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
789
	} else if (INTEL_GEN(dev_priv) >= 8) {
790 791 792 793 794 795 796 797 798 799 800 801
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

802
		for_each_pipe(dev_priv, pipe) {
803 804 805 806 807
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
808 809 810 811
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}
812
			seq_printf(m, "Pipe %c IMR:\t%08x\n",
813 814
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
815
			seq_printf(m, "Pipe %c IIR:\t%08x\n",
816 817
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
818
			seq_printf(m, "Pipe %c IER:\t%08x\n",
819 820
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
821 822

			intel_display_power_put(dev_priv, power_domain);
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
		}

		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IMR));
		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IIR));
		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IER));

		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IMR));
		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IIR));
		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IER));

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
845
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
846 847 848 849 850 851 852 853
		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
854 855 856 857 858 859 860 861 862 863 864
		for_each_pipe(dev_priv, pipe) {
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}

J
Jesse Barnes 已提交
865 866 867
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
868 869
			intel_display_power_put(dev_priv, power_domain);
		}
J
Jesse Barnes 已提交
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894

		seq_printf(m, "Master IER:\t%08x\n",
			   I915_READ(VLV_MASTER_IER));

		seq_printf(m, "Render IER:\t%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Render IIR:\t%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Render IMR:\t%08x\n",
			   I915_READ(GTIMR));

		seq_printf(m, "PM IER:\t\t%08x\n",
			   I915_READ(GEN6_PMIER));
		seq_printf(m, "PM IIR:\t\t%08x\n",
			   I915_READ(GEN6_PMIIR));
		seq_printf(m, "PM IMR:\t\t%08x\n",
			   I915_READ(GEN6_PMIMR));

		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));

895
	} else if (!HAS_PCH_SPLIT(dev_priv)) {
896 897 898 899 900 901
		seq_printf(m, "Interrupt enable:    %08x\n",
			   I915_READ(IER));
		seq_printf(m, "Interrupt identity:  %08x\n",
			   I915_READ(IIR));
		seq_printf(m, "Interrupt mask:      %08x\n",
			   I915_READ(IMR));
902
		for_each_pipe(dev_priv, pipe)
903 904 905
			seq_printf(m, "Pipe %c stat:         %08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
	} else {
		seq_printf(m, "North Display Interrupt enable:		%08x\n",
			   I915_READ(DEIER));
		seq_printf(m, "North Display Interrupt identity:	%08x\n",
			   I915_READ(DEIIR));
		seq_printf(m, "North Display Interrupt mask:		%08x\n",
			   I915_READ(DEIMR));
		seq_printf(m, "South Display Interrupt enable:		%08x\n",
			   I915_READ(SDEIER));
		seq_printf(m, "South Display Interrupt identity:	%08x\n",
			   I915_READ(SDEIIR));
		seq_printf(m, "South Display Interrupt mask:		%08x\n",
			   I915_READ(SDEIMR));
		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
			   I915_READ(GTIMR));
	}
926
	for_each_engine(engine, dev_priv, id) {
927
		if (INTEL_GEN(dev_priv) >= 6) {
928 929
			seq_printf(m,
				   "Graphics Interrupt mask (%s):	%08x\n",
930
				   engine->name, I915_READ_IMR(engine));
931
		}
932
		i915_ring_seqno_info(m, engine);
933
	}
934
	intel_runtime_pm_put(dev_priv);
935

936 937 938
	return 0;
}

939 940
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
941 942
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
943 944 945 946 947
	int i, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
948 949 950

	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
951
		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
952

C
Chris Wilson 已提交
953 954
		seq_printf(m, "Fence %d, pin count = %d, object = ",
			   i, dev_priv->fence_regs[i].pin_count);
955
		if (!vma)
956
			seq_puts(m, "unused");
957
		else
958
			describe_obj(m, vma->obj);
959
		seq_putc(m, '\n');
960 961
	}

962
	mutex_unlock(&dev->struct_mutex);
963 964 965
	return 0;
}

966
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
967 968
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
			      size_t count, loff_t *pos)
969
{
970 971 972 973
	struct i915_gpu_state *error = file->private_data;
	struct drm_i915_error_state_buf str;
	ssize_t ret;
	loff_t tmp;
974

975 976
	if (!error)
		return 0;
977

978 979 980
	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
	if (ret)
		return ret;
981

982 983 984
	ret = i915_error_state_to_str(&str, error);
	if (ret)
		goto out;
985

986 987 988 989
	tmp = 0;
	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
	if (ret < 0)
		goto out;
990

991 992 993 994 995
	*pos = str.start + ret;
out:
	i915_error_state_buf_release(&str);
	return ret;
}
996

997 998 999
static int gpu_state_release(struct inode *inode, struct file *file)
{
	i915_gpu_state_put(file->private_data);
1000
	return 0;
1001 1002
}

1003
static int i915_gpu_info_open(struct inode *inode, struct file *file)
1004
{
1005
	struct drm_i915_private *i915 = inode->i_private;
1006
	struct i915_gpu_state *gpu;
1007

1008 1009 1010
	intel_runtime_pm_get(i915);
	gpu = i915_capture_gpu_state(i915);
	intel_runtime_pm_put(i915);
1011 1012
	if (!gpu)
		return -ENOMEM;
1013

1014
	file->private_data = gpu;
1015 1016 1017
	return 0;
}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
static const struct file_operations i915_gpu_info_fops = {
	.owner = THIS_MODULE,
	.open = i915_gpu_info_open,
	.read = gpu_state_read,
	.llseek = default_llseek,
	.release = gpu_state_release,
};

static ssize_t
i915_error_state_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
1031
{
1032
	struct i915_gpu_state *error = filp->private_data;
1033

1034 1035
	if (!error)
		return 0;
1036

1037 1038
	DRM_DEBUG_DRIVER("Resetting error state\n");
	i915_reset_error_state(error->i915);
1039

1040 1041
	return cnt;
}
1042

1043 1044 1045 1046
static int i915_error_state_open(struct inode *inode, struct file *file)
{
	file->private_data = i915_first_error_state(inode->i_private);
	return 0;
1047 1048 1049 1050 1051
}

static const struct file_operations i915_error_state_fops = {
	.owner = THIS_MODULE,
	.open = i915_error_state_open,
1052
	.read = gpu_state_read,
1053 1054
	.write = i915_error_state_write,
	.llseek = default_llseek,
1055
	.release = gpu_state_release,
1056
};
1057 1058
#endif

1059 1060 1061
static int
i915_next_seqno_set(void *data, u64 val)
{
1062 1063
	struct drm_i915_private *dev_priv = data;
	struct drm_device *dev = &dev_priv->drm;
1064 1065 1066 1067 1068 1069
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1070
	ret = i915_gem_set_global_seqno(dev, val);
1071 1072
	mutex_unlock(&dev->struct_mutex);

1073
	return ret;
1074 1075
}

1076
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1077
			NULL, i915_next_seqno_set,
1078
			"0x%llx\n");
1079

1080
static int i915_frequency_info(struct seq_file *m, void *unused)
1081
{
1082
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1083 1084 1085
	int ret = 0;

	intel_runtime_pm_get(dev_priv);
1086

1087
	if (IS_GEN5(dev_priv)) {
1088 1089 1090 1091 1092 1093 1094 1095 1096
		u16 rgvswctl = I915_READ16(MEMSWCTL);
		u16 rgvstat = I915_READ16(MEMSTAT_ILK);

		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
			   MEMSTAT_VID_SHIFT);
		seq_printf(m, "Current P-state: %d\n",
			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1097
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
		u32 freq_sts;

		mutex_lock(&dev_priv->rps.hw_lock);
		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);

		seq_printf(m, "actual GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));

		seq_printf(m, "current GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));

		seq_printf(m, "max GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));

		seq_printf(m, "min GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));

		seq_printf(m, "idle GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));

		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
		mutex_unlock(&dev_priv->rps.hw_lock);
1124
	} else if (INTEL_GEN(dev_priv) >= 6) {
1125 1126 1127
		u32 rp_state_limits;
		u32 gt_perf_status;
		u32 rp_state_cap;
1128
		u32 rpmodectl, rpinclimit, rpdeclimit;
1129
		u32 rpstat, cagf, reqf;
1130 1131
		u32 rpupei, rpcurup, rpprevup;
		u32 rpdownei, rpcurdown, rpprevdown;
1132
		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1133 1134
		int max_freq;

1135
		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1136
		if (IS_GEN9_LP(dev_priv)) {
1137 1138 1139 1140 1141 1142 1143
			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
		} else {
			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
		}

1144
		/* RPSTAT1 is in the GT power well */
1145
		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1146

1147
		reqf = I915_READ(GEN6_RPNSWREQ);
1148
		if (INTEL_GEN(dev_priv) >= 9)
1149 1150 1151
			reqf >>= 23;
		else {
			reqf &= ~GEN6_TURBO_DISABLE;
1152
			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1153 1154 1155 1156
				reqf >>= 24;
			else
				reqf >>= 25;
		}
1157
		reqf = intel_gpu_freq(dev_priv, reqf);
1158

1159 1160 1161 1162
		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);

1163
		rpstat = I915_READ(GEN6_RPSTAT1);
1164 1165 1166 1167 1168 1169
		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1170
		if (INTEL_GEN(dev_priv) >= 9)
1171
			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1172
		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
B
Ben Widawsky 已提交
1173 1174 1175
			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
		else
			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1176
		cagf = intel_gpu_freq(dev_priv, cagf);
1177

1178
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1179

1180
		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
			pm_ier = I915_READ(GEN6_PMIER);
			pm_imr = I915_READ(GEN6_PMIMR);
			pm_isr = I915_READ(GEN6_PMISR);
			pm_iir = I915_READ(GEN6_PMIIR);
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		} else {
			pm_ier = I915_READ(GEN8_GT_IER(2));
			pm_imr = I915_READ(GEN8_GT_IMR(2));
			pm_isr = I915_READ(GEN8_GT_ISR(2));
			pm_iir = I915_READ(GEN8_GT_IIR(2));
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		}
1193
		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1194
			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1195 1196
		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
			   dev_priv->rps.pm_intrmsk_mbz);
1197 1198
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "Render p-state ratio: %d\n",
1199
			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1200 1201 1202 1203
		seq_printf(m, "Render p-state VID: %d\n",
			   gt_perf_status & 0xff);
		seq_printf(m, "Render p-state limit: %d\n",
			   rp_state_limits & 0xff);
1204 1205 1206 1207
		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1208
		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
B
Ben Widawsky 已提交
1209
		seq_printf(m, "CAGF: %dMHz\n", cagf);
1210 1211 1212 1213 1214 1215
		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
		seq_printf(m, "RP CUR UP: %d (%dus)\n",
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1216 1217 1218
		seq_printf(m, "Up threshold: %d%%\n",
			   dev_priv->rps.up_threshold);

1219 1220 1221 1222 1223 1224
		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1225 1226
		seq_printf(m, "Down threshold: %d%%\n",
			   dev_priv->rps.down_threshold);
1227

1228
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1229
			    rp_state_cap >> 16) & 0xff;
1230 1231
		max_freq *= (IS_GEN9_BC(dev_priv) ||
			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1232
		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1233
			   intel_gpu_freq(dev_priv, max_freq));
1234 1235

		max_freq = (rp_state_cap & 0xff00) >> 8;
1236 1237
		max_freq *= (IS_GEN9_BC(dev_priv) ||
			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1238
		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1239
			   intel_gpu_freq(dev_priv, max_freq));
1240

1241
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1242
			    rp_state_cap >> 0) & 0xff;
1243 1244
		max_freq *= (IS_GEN9_BC(dev_priv) ||
			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1245
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1246
			   intel_gpu_freq(dev_priv, max_freq));
1247
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1248
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1249

1250 1251 1252
		seq_printf(m, "Current freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1253 1254
		seq_printf(m, "Idle freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1255 1256
		seq_printf(m, "Min freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1257 1258
		seq_printf(m, "Boost freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1259 1260 1261 1262 1263
		seq_printf(m, "Max freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1264
	} else {
1265
		seq_puts(m, "no P-state info available\n");
1266
	}
1267

1268
	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1269 1270 1271
	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);

1272 1273
	intel_runtime_pm_put(dev_priv);
	return ret;
1274 1275
}

1276 1277 1278 1279
static void i915_instdone_info(struct drm_i915_private *dev_priv,
			       struct seq_file *m,
			       struct intel_instdone *instdone)
{
1280 1281 1282
	int slice;
	int subslice;

1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
		   instdone->instdone);

	if (INTEL_GEN(dev_priv) <= 3)
		return;

	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
		   instdone->slice_common);

	if (INTEL_GEN(dev_priv) <= 6)
		return;

1295 1296 1297 1298 1299 1300 1301
	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
			   slice, subslice, instdone->sampler[slice][subslice]);

	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
			   slice, subslice, instdone->row[slice][subslice]);
1302 1303
}

1304 1305
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
1306
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1307
	struct intel_engine_cs *engine;
1308 1309
	u64 acthd[I915_NUM_ENGINES];
	u32 seqno[I915_NUM_ENGINES];
1310
	struct intel_instdone instdone;
1311
	enum intel_engine_id id;
1312

1313
	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1314 1315 1316 1317 1318
		seq_puts(m, "Wedged\n");
	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1319
	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1320
		seq_puts(m, "Waiter holding struct mutex\n");
1321
	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1322
		seq_puts(m, "struct_mutex blocked for reset\n");
1323

1324
	if (!i915_modparams.enable_hangcheck) {
1325
		seq_puts(m, "Hangcheck disabled\n");
1326 1327 1328
		return 0;
	}

1329 1330
	intel_runtime_pm_get(dev_priv);

1331
	for_each_engine(engine, dev_priv, id) {
1332
		acthd[id] = intel_engine_get_active_head(engine);
1333
		seqno[id] = intel_engine_get_seqno(engine);
1334 1335
	}

1336
	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1337

1338 1339
	intel_runtime_pm_put(dev_priv);

1340 1341
	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1342 1343
			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
					    jiffies));
1344 1345 1346 1347
	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
		seq_puts(m, "Hangcheck active, work pending\n");
	else
		seq_puts(m, "Hangcheck inactive\n");
1348

1349 1350
	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));

1351
	for_each_engine(engine, dev_priv, id) {
1352 1353 1354
		struct intel_breadcrumbs *b = &engine->breadcrumbs;
		struct rb_node *rb;

1355
		seq_printf(m, "%s:\n", engine->name);
1356
		seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1357
			   engine->hangcheck.seqno, seqno[id],
1358 1359
			   intel_engine_last_submit(engine),
			   engine->timeline->inflight_seqnos);
1360
		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1361 1362
			   yesno(intel_engine_has_waiter(engine)),
			   yesno(test_bit(engine->id,
1363 1364 1365
					  &dev_priv->gpu_error.missed_irq_rings)),
			   yesno(engine->hangcheck.stalled));

1366
		spin_lock_irq(&b->rb_lock);
1367
		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
G
Geliang Tang 已提交
1368
			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1369 1370 1371 1372

			seq_printf(m, "\t%s [%d] waiting for %x\n",
				   w->tsk->comm, w->tsk->pid, w->seqno);
		}
1373
		spin_unlock_irq(&b->rb_lock);
1374

1375
		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1376
			   (long long)engine->hangcheck.acthd,
1377
			   (long long)acthd[id]);
1378 1379 1380 1381 1382
		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
			   hangcheck_action_to_str(engine->hangcheck.action),
			   engine->hangcheck.action,
			   jiffies_to_msecs(jiffies -
					    engine->hangcheck.action_timestamp));
1383

1384
		if (engine->id == RCS) {
1385
			seq_puts(m, "\tinstdone read =\n");
1386

1387
			i915_instdone_info(dev_priv, m, &instdone);
1388

1389
			seq_puts(m, "\tinstdone accu =\n");
1390

1391 1392
			i915_instdone_info(dev_priv, m,
					   &engine->hangcheck.instdone);
1393
		}
1394 1395 1396 1397 1398
	}

	return 0;
}

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
static int i915_reset_info(struct seq_file *m, void *unused)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct i915_gpu_error *error = &dev_priv->gpu_error;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));

	for_each_engine(engine, dev_priv, id) {
		seq_printf(m, "%s = %u\n", engine->name,
			   i915_reset_engine_count(error, engine));
	}

	return 0;
}

1416
static int ironlake_drpc_info(struct seq_file *m)
1417
{
1418
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1419 1420 1421 1422 1423 1424 1425
	u32 rgvmodectl, rstdbyctl;
	u16 crstandvid;

	rgvmodectl = I915_READ(MEMMODECTL);
	rstdbyctl = I915_READ(RSTDBYCTL);
	crstandvid = I915_READ16(CRSTANDVID);

1426
	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1427 1428 1429 1430
	seq_printf(m, "Boost freq: %d\n",
		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
		   MEMMODE_BOOST_FREQ_SHIFT);
	seq_printf(m, "HW control enabled: %s\n",
1431
		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1432
	seq_printf(m, "SW control enabled: %s\n",
1433
		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1434
	seq_printf(m, "Gated voltage change: %s\n",
1435
		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1436 1437
	seq_printf(m, "Starting frequency: P%d\n",
		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1438
	seq_printf(m, "Max P-state: P%d\n",
1439
		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1440 1441 1442 1443
	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
	seq_printf(m, "Render standby enabled: %s\n",
1444
		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1445
	seq_puts(m, "Current RS state: ");
1446 1447
	switch (rstdbyctl & RSX_STATUS_MASK) {
	case RSX_STATUS_ON:
1448
		seq_puts(m, "on\n");
1449 1450
		break;
	case RSX_STATUS_RC1:
1451
		seq_puts(m, "RC1\n");
1452 1453
		break;
	case RSX_STATUS_RC1E:
1454
		seq_puts(m, "RC1E\n");
1455 1456
		break;
	case RSX_STATUS_RS1:
1457
		seq_puts(m, "RS1\n");
1458 1459
		break;
	case RSX_STATUS_RS2:
1460
		seq_puts(m, "RS2 (RC6)\n");
1461 1462
		break;
	case RSX_STATUS_RS3:
1463
		seq_puts(m, "RC3 (RC6+)\n");
1464 1465
		break;
	default:
1466
		seq_puts(m, "unknown\n");
1467 1468
		break;
	}
1469 1470 1471 1472

	return 0;
}

1473
static int i915_forcewake_domains(struct seq_file *m, void *data)
1474
{
1475
	struct drm_i915_private *i915 = node_to_i915(m->private);
1476
	struct intel_uncore_forcewake_domain *fw_domain;
C
Chris Wilson 已提交
1477
	unsigned int tmp;
1478

1479 1480 1481
	seq_printf(m, "user.bypass_count = %u\n",
		   i915->uncore.user_forcewake.count);

1482
	for_each_fw_domain(fw_domain, i915, tmp)
1483
		seq_printf(m, "%s.wake_count = %u\n",
1484
			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1485
			   READ_ONCE(fw_domain->wake_count));
1486

1487 1488 1489
	return 0;
}

1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
static void print_rc6_res(struct seq_file *m,
			  const char *title,
			  const i915_reg_t reg)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);

	seq_printf(m, "%s %u (%llu us)\n",
		   title, I915_READ(reg),
		   intel_rc6_residency_us(dev_priv, reg));
}

1501 1502
static int vlv_drpc_info(struct seq_file *m)
{
1503
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1504
	u32 rpmodectl1, rcctl1, pw_status;
1505

1506
	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);

	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "Turbo enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
					GEN6_RC_CTL_EI_MODE(1))));
	seq_printf(m, "Render Power Well: %s\n",
1523
		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1524
	seq_printf(m, "Media Power Well: %s\n",
1525
		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1526

1527 1528
	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1529

1530
	return i915_forcewake_domains(m, NULL);
1531 1532
}

1533 1534
static int gen6_drpc_info(struct seq_file *m)
{
1535
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
B
Ben Widawsky 已提交
1536
	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1537
	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1538
	unsigned forcewake_count;
1539
	int count = 0;
1540

1541
	forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
1542
	if (forcewake_count) {
1543 1544
		seq_puts(m, "RC information inaccurate because somebody "
			    "holds a forcewake reference \n");
1545 1546 1547 1548 1549 1550 1551
	} else {
		/* NB: we cannot use forcewake, else we read the wrong values */
		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
			udelay(10);
		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
	}

1552
	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1553
	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1554 1555 1556

	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1557
	if (INTEL_GEN(dev_priv) >= 9) {
1558 1559 1560
		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
	}
1561

1562 1563 1564
	mutex_lock(&dev_priv->rps.hw_lock);
	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
	mutex_unlock(&dev_priv->rps.hw_lock);
1565 1566 1567 1568 1569 1570 1571 1572

	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
1573
	seq_printf(m, "RC1e Enabled: %s\n",
1574 1575 1576
		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1577
	if (INTEL_GEN(dev_priv) >= 9) {
1578 1579 1580 1581 1582
		seq_printf(m, "Render Well Gating Enabled: %s\n",
			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
		seq_printf(m, "Media Well Gating Enabled: %s\n",
			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
	}
1583 1584 1585 1586
	seq_printf(m, "Deep RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
	seq_printf(m, "Deepest RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1587
	seq_puts(m, "Current RC state: ");
1588 1589 1590
	switch (gt_core_status & GEN6_RCn_MASK) {
	case GEN6_RC0:
		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1591
			seq_puts(m, "Core Power Down\n");
1592
		else
1593
			seq_puts(m, "on\n");
1594 1595
		break;
	case GEN6_RC3:
1596
		seq_puts(m, "RC3\n");
1597 1598
		break;
	case GEN6_RC6:
1599
		seq_puts(m, "RC6\n");
1600 1601
		break;
	case GEN6_RC7:
1602
		seq_puts(m, "RC7\n");
1603 1604
		break;
	default:
1605
		seq_puts(m, "Unknown\n");
1606 1607 1608 1609 1610
		break;
	}

	seq_printf(m, "Core Power Down: %s\n",
		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1611
	if (INTEL_GEN(dev_priv) >= 9) {
1612 1613 1614 1615 1616 1617 1618
		seq_printf(m, "Render Power Well: %s\n",
			(gen9_powergate_status &
			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
		seq_printf(m, "Media Power Well: %s\n",
			(gen9_powergate_status &
			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
	}
1619 1620

	/* Not exactly sure what this is */
1621 1622 1623 1624 1625
	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
		      GEN6_GT_GFX_RC6_LOCKED);
	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1626

B
Ben Widawsky 已提交
1627 1628 1629 1630 1631 1632
	seq_printf(m, "RC6   voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
	seq_printf(m, "RC6+  voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
	seq_printf(m, "RC6++ voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1633
	return i915_forcewake_domains(m, NULL);
1634 1635 1636 1637
}

static int i915_drpc_info(struct seq_file *m, void *unused)
{
1638
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1639 1640 1641
	int err;

	intel_runtime_pm_get(dev_priv);
1642

1643
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1644
		err = vlv_drpc_info(m);
1645
	else if (INTEL_GEN(dev_priv) >= 6)
1646
		err = gen6_drpc_info(m);
1647
	else
1648 1649 1650 1651 1652
		err = ironlake_drpc_info(m);

	intel_runtime_pm_put(dev_priv);

	return err;
1653 1654
}

1655 1656
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
1657
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667

	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
		   dev_priv->fb_tracking.busy_bits);

	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
		   dev_priv->fb_tracking.flip_bits);

	return 0;
}

1668 1669
static int i915_fbc_status(struct seq_file *m, void *unused)
{
1670
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1671

1672
	if (!HAS_FBC(dev_priv)) {
1673
		seq_puts(m, "FBC unsupported on this chipset\n");
1674 1675 1676
		return 0;
	}

1677
	intel_runtime_pm_get(dev_priv);
P
Paulo Zanoni 已提交
1678
	mutex_lock(&dev_priv->fbc.lock);
1679

1680
	if (intel_fbc_is_active(dev_priv))
1681
		seq_puts(m, "FBC enabled\n");
1682 1683
	else
		seq_printf(m, "FBC disabled: %s\n",
1684
			   dev_priv->fbc.no_fbc_reason);
1685

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
	if (intel_fbc_is_active(dev_priv)) {
		u32 mask;

		if (INTEL_GEN(dev_priv) >= 8)
			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
		else if (INTEL_GEN(dev_priv) >= 7)
			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
		else if (INTEL_GEN(dev_priv) >= 5)
			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
		else if (IS_G4X(dev_priv))
			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
		else
			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
							FBC_STAT_COMPRESSED);

		seq_printf(m, "Compressing: %s\n", yesno(mask));
1702
	}
1703

P
Paulo Zanoni 已提交
1704
	mutex_unlock(&dev_priv->fbc.lock);
1705 1706
	intel_runtime_pm_put(dev_priv);

1707 1708 1709
	return 0;
}

1710
static int i915_fbc_false_color_get(void *data, u64 *val)
1711
{
1712
	struct drm_i915_private *dev_priv = data;
1713

1714
	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1715 1716 1717 1718 1719 1720 1721
		return -ENODEV;

	*val = dev_priv->fbc.false_color;

	return 0;
}

1722
static int i915_fbc_false_color_set(void *data, u64 val)
1723
{
1724
	struct drm_i915_private *dev_priv = data;
1725 1726
	u32 reg;

1727
	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1728 1729
		return -ENODEV;

P
Paulo Zanoni 已提交
1730
	mutex_lock(&dev_priv->fbc.lock);
1731 1732 1733 1734 1735 1736 1737 1738

	reg = I915_READ(ILK_DPFC_CONTROL);
	dev_priv->fbc.false_color = val;

	I915_WRITE(ILK_DPFC_CONTROL, val ?
		   (reg | FBC_CTL_FALSE_COLOR) :
		   (reg & ~FBC_CTL_FALSE_COLOR));

P
Paulo Zanoni 已提交
1739
	mutex_unlock(&dev_priv->fbc.lock);
1740 1741 1742
	return 0;
}

1743 1744
DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
			i915_fbc_false_color_get, i915_fbc_false_color_set,
1745 1746
			"%llu\n");

1747 1748
static int i915_ips_status(struct seq_file *m, void *unused)
{
1749
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1750

1751
	if (!HAS_IPS(dev_priv)) {
1752 1753 1754 1755
		seq_puts(m, "not supported\n");
		return 0;
	}

1756 1757
	intel_runtime_pm_get(dev_priv);

1758
	seq_printf(m, "Enabled by kernel parameter: %s\n",
1759
		   yesno(i915_modparams.enable_ips));
1760

1761
	if (INTEL_GEN(dev_priv) >= 8) {
1762 1763 1764 1765 1766 1767 1768
		seq_puts(m, "Currently: unknown\n");
	} else {
		if (I915_READ(IPS_CTL) & IPS_ENABLE)
			seq_puts(m, "Currently: enabled\n");
		else
			seq_puts(m, "Currently: disabled\n");
	}
1769

1770 1771
	intel_runtime_pm_put(dev_priv);

1772 1773 1774
	return 0;
}

1775 1776
static int i915_sr_status(struct seq_file *m, void *unused)
{
1777
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1778 1779
	bool sr_enabled = false;

1780
	intel_runtime_pm_get(dev_priv);
1781
	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1782

1783 1784 1785
	if (INTEL_GEN(dev_priv) >= 9)
		/* no global SR status; inspect per-plane WM */;
	else if (HAS_PCH_SPLIT(dev_priv))
1786
		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1787
	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1788
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1789
		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1790
	else if (IS_I915GM(dev_priv))
1791
		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1792
	else if (IS_PINEVIEW(dev_priv))
1793
		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1794
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1795
		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1796

1797
	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1798 1799
	intel_runtime_pm_put(dev_priv);

1800
	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1801 1802 1803 1804

	return 0;
}

1805 1806
static int i915_emon_status(struct seq_file *m, void *unused)
{
1807 1808
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1809
	unsigned long temp, chipset, gfx;
1810 1811
	int ret;

1812
	if (!IS_GEN5(dev_priv))
1813 1814
		return -ENODEV;

1815 1816 1817
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1818 1819 1820 1821

	temp = i915_mch_val(dev_priv);
	chipset = i915_chipset_val(dev_priv);
	gfx = i915_gfx_val(dev_priv);
1822
	mutex_unlock(&dev->struct_mutex);
1823 1824 1825 1826 1827 1828 1829 1830 1831

	seq_printf(m, "GMCH temp: %ld\n", temp);
	seq_printf(m, "Chipset power: %ld\n", chipset);
	seq_printf(m, "GFX power: %ld\n", gfx);
	seq_printf(m, "Total power: %ld\n", chipset + gfx);

	return 0;
}

1832 1833
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
1834
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1835
	int ret = 0;
1836
	int gpu_freq, ia_freq;
1837
	unsigned int max_gpu_freq, min_gpu_freq;
1838

1839
	if (!HAS_LLC(dev_priv)) {
1840
		seq_puts(m, "unsupported on this chipset\n");
1841 1842 1843
		return 0;
	}

1844 1845
	intel_runtime_pm_get(dev_priv);

1846
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1847
	if (ret)
1848
		goto out;
1849

1850
	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
		/* Convert GT frequency to 50 HZ units */
		min_gpu_freq =
			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
		max_gpu_freq =
			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
	} else {
		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
	}

1861
	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1862

1863
	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
B
Ben Widawsky 已提交
1864 1865 1866 1867
		ia_freq = gpu_freq;
		sandybridge_pcode_read(dev_priv,
				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
				       &ia_freq);
1868
		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1869
			   intel_gpu_freq(dev_priv, (gpu_freq *
1870 1871
						     (IS_GEN9_BC(dev_priv) ||
						      IS_CANNONLAKE(dev_priv) ?
1872
						      GEN9_FREQ_SCALER : 1))),
1873 1874
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
1875 1876
	}

1877
	mutex_unlock(&dev_priv->rps.hw_lock);
1878

1879 1880 1881
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1882 1883
}

1884 1885
static int i915_opregion(struct seq_file *m, void *unused)
{
1886 1887
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1888 1889 1890 1891 1892
	struct intel_opregion *opregion = &dev_priv->opregion;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
1893
		goto out;
1894

1895 1896
	if (opregion->header)
		seq_write(m, opregion->header, OPREGION_SIZE);
1897 1898 1899

	mutex_unlock(&dev->struct_mutex);

1900
out:
1901 1902 1903
	return 0;
}

1904 1905
static int i915_vbt(struct seq_file *m, void *unused)
{
1906
	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1907 1908 1909 1910 1911 1912 1913

	if (opregion->vbt)
		seq_write(m, opregion->vbt, opregion->vbt_size);

	return 0;
}

1914 1915
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
1916 1917
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1918
	struct intel_framebuffer *fbdev_fb = NULL;
1919
	struct drm_framebuffer *drm_fb;
1920 1921 1922 1923 1924
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1925

1926
#ifdef CONFIG_DRM_FBDEV_EMULATION
1927
	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1928
		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1929 1930 1931 1932

		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
			   fbdev_fb->base.width,
			   fbdev_fb->base.height,
V
Ville Syrjälä 已提交
1933
			   fbdev_fb->base.format->depth,
V
Ville Syrjälä 已提交
1934
			   fbdev_fb->base.format->cpp[0] * 8,
V
Ville Syrjälä 已提交
1935
			   fbdev_fb->base.modifier,
1936 1937 1938 1939
			   drm_framebuffer_read_refcount(&fbdev_fb->base));
		describe_obj(m, fbdev_fb->obj);
		seq_putc(m, '\n');
	}
1940
#endif
1941

1942
	mutex_lock(&dev->mode_config.fb_lock);
1943
	drm_for_each_fb(drm_fb, dev) {
1944 1945
		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
		if (fb == fbdev_fb)
1946 1947
			continue;

1948
		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1949 1950
			   fb->base.width,
			   fb->base.height,
V
Ville Syrjälä 已提交
1951
			   fb->base.format->depth,
V
Ville Syrjälä 已提交
1952
			   fb->base.format->cpp[0] * 8,
V
Ville Syrjälä 已提交
1953
			   fb->base.modifier,
1954
			   drm_framebuffer_read_refcount(&fb->base));
1955
		describe_obj(m, fb->obj);
1956
		seq_putc(m, '\n');
1957
	}
1958
	mutex_unlock(&dev->mode_config.fb_lock);
1959
	mutex_unlock(&dev->struct_mutex);
1960 1961 1962 1963

	return 0;
}

1964
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1965
{
1966 1967
	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
		   ring->space, ring->head, ring->tail);
1968 1969
}

1970 1971
static int i915_context_status(struct seq_file *m, void *unused)
{
1972 1973
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1974
	struct intel_engine_cs *engine;
1975
	struct i915_gem_context *ctx;
1976
	enum intel_engine_id id;
1977
	int ret;
1978

1979
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1980 1981 1982
	if (ret)
		return ret;

1983
	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1984
		seq_printf(m, "HW context %u ", ctx->hw_id);
1985
		if (ctx->pid) {
1986 1987
			struct task_struct *task;

1988
			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1989 1990 1991 1992 1993
			if (task) {
				seq_printf(m, "(%s [%d]) ",
					   task->comm, task->pid);
				put_task_struct(task);
			}
1994 1995
		} else if (IS_ERR(ctx->file_priv)) {
			seq_puts(m, "(deleted) ");
1996 1997 1998 1999
		} else {
			seq_puts(m, "(kernel) ");
		}

2000 2001
		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
		seq_putc(m, '\n');
2002

2003
		for_each_engine(engine, dev_priv, id) {
2004 2005 2006 2007 2008
			struct intel_context *ce = &ctx->engine[engine->id];

			seq_printf(m, "%s: ", engine->name);
			seq_putc(m, ce->initialised ? 'I' : 'i');
			if (ce->state)
2009
				describe_obj(m, ce->state->obj);
2010
			if (ce->ring)
2011
				describe_ctx_ring(m, ce->ring);
2012 2013
			seq_putc(m, '\n');
		}
2014 2015

		seq_putc(m, '\n');
2016 2017
	}

2018
	mutex_unlock(&dev->struct_mutex);
2019 2020 2021 2022

	return 0;
}

2023
static void i915_dump_lrc_obj(struct seq_file *m,
2024
			      struct i915_gem_context *ctx,
2025
			      struct intel_engine_cs *engine)
2026
{
2027
	struct i915_vma *vma = ctx->engine[engine->id].state;
2028 2029 2030
	struct page *page;
	int j;

2031 2032
	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);

2033 2034
	if (!vma) {
		seq_puts(m, "\tFake context\n");
2035 2036 2037
		return;
	}

2038 2039
	if (vma->flags & I915_VMA_GLOBAL_BIND)
		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
2040
			   i915_ggtt_offset(vma));
2041

C
Chris Wilson 已提交
2042
	if (i915_gem_object_pin_pages(vma->obj)) {
2043
		seq_puts(m, "\tFailed to get pages for context object\n\n");
2044 2045 2046
		return;
	}

2047 2048 2049
	page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
	if (page) {
		u32 *reg_state = kmap_atomic(page);
2050 2051

		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2052 2053 2054
			seq_printf(m,
				   "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
				   j * 4,
2055 2056 2057 2058 2059 2060
				   reg_state[j], reg_state[j + 1],
				   reg_state[j + 2], reg_state[j + 3]);
		}
		kunmap_atomic(reg_state);
	}

C
Chris Wilson 已提交
2061
	i915_gem_object_unpin_pages(vma->obj);
2062 2063 2064
	seq_putc(m, '\n');
}

2065 2066
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
2067 2068
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2069
	struct intel_engine_cs *engine;
2070
	struct i915_gem_context *ctx;
2071
	enum intel_engine_id id;
2072
	int ret;
2073

2074
	if (!i915_modparams.enable_execlists) {
2075 2076 2077 2078 2079 2080 2081 2082
		seq_printf(m, "Logical Ring Contexts are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

2083
	list_for_each_entry(ctx, &dev_priv->contexts.list, link)
2084
		for_each_engine(engine, dev_priv, id)
2085
			i915_dump_lrc_obj(m, ctx, engine);
2086 2087 2088 2089 2090 2091

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

2092 2093
static const char *swizzle_string(unsigned swizzle)
{
2094
	switch (swizzle) {
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
	case I915_BIT_6_SWIZZLE_NONE:
		return "none";
	case I915_BIT_6_SWIZZLE_9:
		return "bit9";
	case I915_BIT_6_SWIZZLE_9_10:
		return "bit9/bit10";
	case I915_BIT_6_SWIZZLE_9_11:
		return "bit9/bit11";
	case I915_BIT_6_SWIZZLE_9_10_11:
		return "bit9/bit10/bit11";
	case I915_BIT_6_SWIZZLE_9_17:
		return "bit9/bit17";
	case I915_BIT_6_SWIZZLE_9_10_17:
		return "bit9/bit10/bit17";
	case I915_BIT_6_SWIZZLE_UNKNOWN:
2110
		return "unknown";
2111 2112 2113 2114 2115 2116 2117
	}

	return "bug";
}

static int i915_swizzle_info(struct seq_file *m, void *data)
{
2118
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2119

2120
	intel_runtime_pm_get(dev_priv);
2121 2122 2123 2124 2125 2126

	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));

2127
	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2128 2129
		seq_printf(m, "DDC = 0x%08x\n",
			   I915_READ(DCC));
2130 2131
		seq_printf(m, "DDC2 = 0x%08x\n",
			   I915_READ(DCC2));
2132 2133 2134 2135
		seq_printf(m, "C0DRB3 = 0x%04x\n",
			   I915_READ16(C0DRB3));
		seq_printf(m, "C1DRB3 = 0x%04x\n",
			   I915_READ16(C1DRB3));
2136
	} else if (INTEL_GEN(dev_priv) >= 6) {
2137 2138 2139 2140 2141 2142 2143 2144
		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C0));
		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C1));
		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C2));
		seq_printf(m, "TILECTL = 0x%08x\n",
			   I915_READ(TILECTL));
2145
		if (INTEL_GEN(dev_priv) >= 8)
B
Ben Widawsky 已提交
2146 2147 2148 2149 2150
			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
				   I915_READ(GAMTARBMODE));
		else
			seq_printf(m, "ARB_MODE = 0x%08x\n",
				   I915_READ(ARB_MODE));
2151 2152
		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
			   I915_READ(DISP_ARB_CTL));
2153
	}
2154 2155 2156 2157

	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		seq_puts(m, "L-shaped memory detected\n");

2158
	intel_runtime_pm_put(dev_priv);
2159 2160 2161 2162

	return 0;
}

B
Ben Widawsky 已提交
2163 2164
static int per_file_ctx(int id, void *ptr, void *data)
{
2165
	struct i915_gem_context *ctx = ptr;
B
Ben Widawsky 已提交
2166
	struct seq_file *m = data;
2167 2168 2169 2170 2171 2172 2173
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;

	if (!ppgtt) {
		seq_printf(m, "  no ppgtt for context %d\n",
			   ctx->user_handle);
		return 0;
	}
B
Ben Widawsky 已提交
2174

2175 2176 2177
	if (i915_gem_context_is_default(ctx))
		seq_puts(m, "  default context:\n");
	else
2178
		seq_printf(m, "  context %d:\n", ctx->user_handle);
B
Ben Widawsky 已提交
2179 2180 2181 2182 2183
	ppgtt->debug_dump(ppgtt, m);

	return 0;
}

2184 2185
static void gen8_ppgtt_info(struct seq_file *m,
			    struct drm_i915_private *dev_priv)
D
Daniel Vetter 已提交
2186
{
B
Ben Widawsky 已提交
2187
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2188 2189
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
2190
	int i;
D
Daniel Vetter 已提交
2191

B
Ben Widawsky 已提交
2192 2193 2194
	if (!ppgtt)
		return;

2195
	for_each_engine(engine, dev_priv, id) {
2196
		seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
2197
		for (i = 0; i < 4; i++) {
2198
			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
B
Ben Widawsky 已提交
2199
			pdp <<= 32;
2200
			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2201
			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
B
Ben Widawsky 已提交
2202 2203 2204 2205
		}
	}
}

2206 2207
static void gen6_ppgtt_info(struct seq_file *m,
			    struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
2208
{
2209
	struct intel_engine_cs *engine;
2210
	enum intel_engine_id id;
D
Daniel Vetter 已提交
2211

2212
	if (IS_GEN6(dev_priv))
D
Daniel Vetter 已提交
2213 2214
		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));

2215
	for_each_engine(engine, dev_priv, id) {
2216
		seq_printf(m, "%s\n", engine->name);
2217
		if (IS_GEN7(dev_priv))
2218 2219 2220 2221 2222 2223 2224 2225
			seq_printf(m, "GFX_MODE: 0x%08x\n",
				   I915_READ(RING_MODE_GEN7(engine)));
		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE(engine)));
		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
			   I915_READ(RING_PP_DIR_DCLV(engine)));
D
Daniel Vetter 已提交
2226 2227 2228 2229
	}
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

2230
		seq_puts(m, "aliasing PPGTT:\n");
2231
		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
B
Ben Widawsky 已提交
2232

B
Ben Widawsky 已提交
2233
		ppgtt->debug_dump(ppgtt, m);
2234
	}
B
Ben Widawsky 已提交
2235

D
Daniel Vetter 已提交
2236
	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
B
Ben Widawsky 已提交
2237 2238 2239 2240
}

static int i915_ppgtt_info(struct seq_file *m, void *data)
{
2241 2242
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2243
	struct drm_file *file;
2244
	int ret;
B
Ben Widawsky 已提交
2245

2246 2247
	mutex_lock(&dev->filelist_mutex);
	ret = mutex_lock_interruptible(&dev->struct_mutex);
B
Ben Widawsky 已提交
2248
	if (ret)
2249 2250
		goto out_unlock;

2251
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
2252

2253 2254 2255 2256
	if (INTEL_GEN(dev_priv) >= 8)
		gen8_ppgtt_info(m, dev_priv);
	else if (INTEL_GEN(dev_priv) >= 6)
		gen6_ppgtt_info(m, dev_priv);
B
Ben Widawsky 已提交
2257

2258 2259
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
2260
		struct task_struct *task;
2261

2262
		task = get_pid_task(file->pid, PIDTYPE_PID);
2263 2264
		if (!task) {
			ret = -ESRCH;
2265
			goto out_rpm;
2266
		}
2267 2268
		seq_printf(m, "\nproc: %s\n", task->comm);
		put_task_struct(task);
2269 2270 2271 2272
		idr_for_each(&file_priv->context_idr, per_file_ctx,
			     (void *)(unsigned long)m);
	}

2273
out_rpm:
2274
	intel_runtime_pm_put(dev_priv);
D
Daniel Vetter 已提交
2275
	mutex_unlock(&dev->struct_mutex);
2276 2277
out_unlock:
	mutex_unlock(&dev->filelist_mutex);
2278
	return ret;
D
Daniel Vetter 已提交
2279 2280
}

2281 2282
static int count_irq_waiters(struct drm_i915_private *i915)
{
2283
	struct intel_engine_cs *engine;
2284
	enum intel_engine_id id;
2285 2286
	int count = 0;

2287
	for_each_engine(engine, i915, id)
2288
		count += intel_engine_has_waiter(engine);
2289 2290 2291 2292

	return count;
}

2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
static const char *rps_power_to_str(unsigned int power)
{
	static const char * const strings[] = {
		[LOW_POWER] = "low power",
		[BETWEEN] = "mixed",
		[HIGH_POWER] = "high power",
	};

	if (power >= ARRAY_SIZE(strings) || !strings[power])
		return "unknown";

	return strings[power];
}

2307 2308
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
2309 2310
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2311 2312
	struct drm_file *file;

2313
	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2314 2315
	seq_printf(m, "GPU busy? %s [%d requests]\n",
		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2316
	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2317 2318
	seq_printf(m, "Boosts outstanding? %d\n",
		   atomic_read(&dev_priv->rps.num_waiters));
2319 2320 2321
	seq_printf(m, "Frequency requested %d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2322 2323 2324 2325
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2326 2327 2328 2329
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
2330 2331

	mutex_lock(&dev->filelist_mutex);
2332 2333 2334 2335 2336 2337
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
2338
		seq_printf(m, "%s [%d]: %d boosts\n",
2339 2340
			   task ? task->comm : "<unknown>",
			   task ? task->pid : -1,
2341
			   atomic_read(&file_priv->rps.boosts));
2342 2343
		rcu_read_unlock();
	}
2344 2345
	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
		   atomic_read(&dev_priv->rps.boosts));
2346
	mutex_unlock(&dev->filelist_mutex);
2347

2348 2349
	if (INTEL_GEN(dev_priv) >= 6 &&
	    dev_priv->rps.enabled &&
2350
	    dev_priv->gt.active_requests) {
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
		u32 rpup, rpupei;
		u32 rpdown, rpdownei;

		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
			   rps_power_to_str(dev_priv->rps.power));
		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2364
			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2365 2366
			   dev_priv->rps.up_threshold);
		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2367
			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2368 2369 2370 2371 2372
			   dev_priv->rps.down_threshold);
	} else {
		seq_puts(m, "\nRPS Autotuning inactive\n");
	}

2373
	return 0;
2374 2375
}

2376 2377
static int i915_llc(struct seq_file *m, void *data)
{
2378
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2379
	const bool edram = INTEL_GEN(dev_priv) > 8;
2380

2381
	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2382 2383
	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
		   intel_uncore_edram_size(dev_priv)/1024/1024);
2384 2385 2386 2387

	return 0;
}

2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;

	if (!HAS_HUC_UCODE(dev_priv))
		return 0;

	seq_puts(m, "HuC firmware status:\n");
	seq_printf(m, "\tpath: %s\n", huc_fw->path);
	seq_printf(m, "\tfetch: %s\n",
		intel_uc_fw_status_repr(huc_fw->fetch_status));
	seq_printf(m, "\tload: %s\n",
		intel_uc_fw_status_repr(huc_fw->load_status));
	seq_printf(m, "\tversion wanted: %d.%d\n",
		huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
	seq_printf(m, "\tversion found: %d.%d\n",
		huc_fw->major_ver_found, huc_fw->minor_ver_found);
	seq_printf(m, "\theader: offset is %d; size = %d\n",
		huc_fw->header_offset, huc_fw->header_size);
	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
		huc_fw->ucode_offset, huc_fw->ucode_size);
	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
		huc_fw->rsa_offset, huc_fw->rsa_size);

2413
	intel_runtime_pm_get(dev_priv);
2414
	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2415
	intel_runtime_pm_put(dev_priv);
2416 2417 2418 2419

	return 0;
}

2420 2421
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
2422
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2423
	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
2424 2425
	u32 tmp, i;

2426
	if (!HAS_GUC_UCODE(dev_priv))
2427 2428 2429 2430
		return 0;

	seq_printf(m, "GuC firmware status:\n");
	seq_printf(m, "\tpath: %s\n",
2431
		guc_fw->path);
2432
	seq_printf(m, "\tfetch: %s\n",
2433
		intel_uc_fw_status_repr(guc_fw->fetch_status));
2434
	seq_printf(m, "\tload: %s\n",
2435
		intel_uc_fw_status_repr(guc_fw->load_status));
2436
	seq_printf(m, "\tversion wanted: %d.%d\n",
2437
		guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
2438
	seq_printf(m, "\tversion found: %d.%d\n",
2439
		guc_fw->major_ver_found, guc_fw->minor_ver_found);
A
Alex Dai 已提交
2440 2441 2442 2443 2444 2445
	seq_printf(m, "\theader: offset is %d; size = %d\n",
		guc_fw->header_offset, guc_fw->header_size);
	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
		guc_fw->ucode_offset, guc_fw->ucode_size);
	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
		guc_fw->rsa_offset, guc_fw->rsa_size);
2446

2447 2448
	intel_runtime_pm_get(dev_priv);

2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
	tmp = I915_READ(GUC_STATUS);

	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
	seq_printf(m, "\tBootrom status = 0x%x\n",
		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
	seq_printf(m, "\tuKernel status = 0x%x\n",
		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
	seq_printf(m, "\tMIA Core status = 0x%x\n",
		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
	seq_puts(m, "\nScratch registers:\n");
	for (i = 0; i < 16; i++)
		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));

2462 2463
	intel_runtime_pm_put(dev_priv);

2464 2465 2466
	return 0;
}

2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
static void i915_guc_log_info(struct seq_file *m,
			      struct drm_i915_private *dev_priv)
{
	struct intel_guc *guc = &dev_priv->guc;

	seq_puts(m, "\nGuC logging stats:\n");

	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);

	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);

	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);

	seq_printf(m, "\tTotal flush interrupt count: %u\n",
		   guc->log.flush_interrupt_count);

	seq_printf(m, "\tCapture miss count: %u\n",
		   guc->log.capture_miss_count);
}

2493 2494 2495 2496
static void i915_guc_client_info(struct seq_file *m,
				 struct drm_i915_private *dev_priv,
				 struct i915_guc_client *client)
{
2497
	struct intel_engine_cs *engine;
2498
	enum intel_engine_id id;
2499 2500
	uint64_t tot = 0;

2501 2502
	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
		client->priority, client->stage_id, client->proc_desc_offset);
2503 2504
	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
		client->doorbell_id, client->doorbell_offset);
2505

2506
	for_each_engine(engine, dev_priv, id) {
2507 2508
		u64 submissions = client->submissions[id];
		tot += submissions;
2509
		seq_printf(m, "\tSubmissions: %llu %s\n",
2510
				submissions, engine->name);
2511 2512 2513 2514
	}
	seq_printf(m, "\tTotal: %llu\n", tot);
}

2515
static bool check_guc_submission(struct seq_file *m)
2516
{
2517
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2518
	const struct intel_guc *guc = &dev_priv->guc;
2519

2520 2521 2522 2523 2524
	if (!guc->execbuf_client) {
		seq_printf(m, "GuC submission %s\n",
			   HAS_GUC_SCHED(dev_priv) ?
			   "disabled" :
			   "not supported");
2525
		return false;
2526
	}
2527

2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
	return true;
}

static int i915_guc_info(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	const struct intel_guc *guc = &dev_priv->guc;

	if (!check_guc_submission(m))
		return 0;

2539
	seq_printf(m, "Doorbell map:\n");
2540
	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2541
	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2542

2543 2544
	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2545

2546 2547
	i915_guc_log_info(m, dev_priv);

2548 2549 2550 2551 2552
	/* Add more as required ... */

	return 0;
}

2553
static int i915_guc_stage_pool(struct seq_file *m, void *data)
A
Alex Dai 已提交
2554
{
2555
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2556 2557 2558 2559 2560
	const struct intel_guc *guc = &dev_priv->guc;
	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
	struct i915_guc_client *client = guc->execbuf_client;
	unsigned int tmp;
	int index;
A
Alex Dai 已提交
2561

2562
	if (!check_guc_submission(m))
A
Alex Dai 已提交
2563 2564
		return 0;

2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
		struct intel_engine_cs *engine;

		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
			continue;

		seq_printf(m, "GuC stage descriptor %u:\n", index);
		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
		seq_printf(m, "\tPriority: %d\n", desc->priority);
		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
		seq_printf(m, "\tEngines used: 0x%x\n",
			   desc->engines_used);
		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
			   desc->db_trigger_phy,
			   desc->db_trigger_cpu,
			   desc->db_trigger_uk);
		seq_printf(m, "\tProcess descriptor: 0x%x\n",
			   desc->process_desc);
2584
		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
			   desc->wq_addr, desc->wq_size);
		seq_putc(m, '\n');

		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
			u32 guc_engine_id = engine->guc_id;
			struct guc_execlist_context *lrc =
						&desc->lrc[guc_engine_id];

			seq_printf(m, "\t%s LRC:\n", engine->name);
			seq_printf(m, "\t\tContext desc: 0x%x\n",
				   lrc->context_desc);
			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
			seq_putc(m, '\n');
		}
	}

	return 0;
}

A
Alex Dai 已提交
2607 2608
static int i915_guc_log_dump(struct seq_file *m, void *data)
{
2609 2610 2611 2612 2613 2614
	struct drm_info_node *node = m->private;
	struct drm_i915_private *dev_priv = node_to_i915(node);
	bool dump_load_err = !!node->info_ent->data;
	struct drm_i915_gem_object *obj = NULL;
	u32 *log;
	int i = 0;
A
Alex Dai 已提交
2615

2616 2617 2618 2619
	if (dump_load_err)
		obj = dev_priv->guc.load_err_log;
	else if (dev_priv->guc.log.vma)
		obj = dev_priv->guc.log.vma->obj;
A
Alex Dai 已提交
2620

2621 2622
	if (!obj)
		return 0;
A
Alex Dai 已提交
2623

2624 2625 2626 2627 2628
	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
	if (IS_ERR(log)) {
		DRM_DEBUG("Failed to pin object\n");
		seq_puts(m, "(log data unaccessible)\n");
		return PTR_ERR(log);
A
Alex Dai 已提交
2629 2630
	}

2631 2632 2633 2634 2635
	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
			   *(log + i), *(log + i + 1),
			   *(log + i + 2), *(log + i + 3));

A
Alex Dai 已提交
2636 2637
	seq_putc(m, '\n');

2638 2639
	i915_gem_object_unpin_map(obj);

A
Alex Dai 已提交
2640 2641 2642
	return 0;
}

2643 2644
static int i915_guc_log_control_get(void *data, u64 *val)
{
2645
	struct drm_i915_private *dev_priv = data;
2646 2647 2648 2649

	if (!dev_priv->guc.log.vma)
		return -EINVAL;

2650
	*val = i915_modparams.guc_log_level;
2651 2652 2653 2654 2655 2656

	return 0;
}

static int i915_guc_log_control_set(void *data, u64 val)
{
2657
	struct drm_i915_private *dev_priv = data;
2658 2659 2660 2661 2662
	int ret;

	if (!dev_priv->guc.log.vma)
		return -EINVAL;

2663
	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
2664 2665 2666 2667 2668 2669 2670
	if (ret)
		return ret;

	intel_runtime_pm_get(dev_priv);
	ret = i915_guc_log_control(dev_priv, val);
	intel_runtime_pm_put(dev_priv);

2671
	mutex_unlock(&dev_priv->drm.struct_mutex);
2672 2673 2674 2675 2676 2677 2678
	return ret;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
			i915_guc_log_control_get, i915_guc_log_control_set,
			"%lld\n");

2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
static const char *psr2_live_status(u32 val)
{
	static const char * const live_status[] = {
		"IDLE",
		"CAPTURE",
		"CAPTURE_FS",
		"SLEEP",
		"BUFON_FW",
		"ML_UP",
		"SU_STANDBY",
		"FAST_SLEEP",
		"DEEP_SLEEP",
		"BUF_ON",
		"TG_ON"
	};

	val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
	if (val < ARRAY_SIZE(live_status))
		return live_status[val];

	return "unknown";
}

2702 2703
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
2704
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
R
Rodrigo Vivi 已提交
2705
	u32 psrperf = 0;
R
Rodrigo Vivi 已提交
2706 2707
	u32 stat[3];
	enum pipe pipe;
R
Rodrigo Vivi 已提交
2708
	bool enabled = false;
2709

2710
	if (!HAS_PSR(dev_priv)) {
2711 2712 2713 2714
		seq_puts(m, "PSR not supported\n");
		return 0;
	}

2715 2716
	intel_runtime_pm_get(dev_priv);

2717
	mutex_lock(&dev_priv->psr.lock);
R
Rodrigo Vivi 已提交
2718 2719
	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2720
	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2721
	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2722 2723 2724 2725
	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
		   dev_priv->psr.busy_frontbuffer_bits);
	seq_printf(m, "Re-enable work scheduled: %s\n",
		   yesno(work_busy(&dev_priv->psr.work.work)));
2726

2727 2728 2729 2730 2731 2732
	if (HAS_DDI(dev_priv)) {
		if (dev_priv->psr.psr2_support)
			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
		else
			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
	} else {
2733
		for_each_pipe(dev_priv, pipe) {
2734 2735 2736 2737 2738 2739 2740 2741 2742
			enum transcoder cpu_transcoder =
				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain))
				continue;

2743 2744 2745 2746 2747
			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
				VLV_EDP_PSR_CURR_STATE_MASK;
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				enabled = true;
2748 2749

			intel_display_power_put(dev_priv, power_domain);
R
Rodrigo Vivi 已提交
2750 2751
		}
	}
2752 2753 2754 2755

	seq_printf(m, "Main link in standby mode: %s\n",
		   yesno(dev_priv->psr.link_standby));

R
Rodrigo Vivi 已提交
2756 2757
	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));

2758
	if (!HAS_DDI(dev_priv))
R
Rodrigo Vivi 已提交
2759 2760 2761 2762 2763 2764
		for_each_pipe(dev_priv, pipe) {
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				seq_printf(m, " pipe %c", pipe_name(pipe));
		}
	seq_puts(m, "\n");
2765

2766 2767 2768 2769
	/*
	 * VLV/CHV PSR has no kind of performance counter
	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
	 */
2770
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2771
		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
R
Rodrigo Vivi 已提交
2772
			EDP_PSR_PERF_CNT_MASK;
R
Rodrigo Vivi 已提交
2773 2774 2775

		seq_printf(m, "Performance_Counter: %u\n", psrperf);
	}
2776
	if (dev_priv->psr.psr2_support) {
2777 2778 2779 2780
		u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);

		seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
			   psr2, psr2_live_status(psr2));
2781
	}
2782
	mutex_unlock(&dev_priv->psr.lock);
2783

2784
	intel_runtime_pm_put(dev_priv);
2785 2786 2787
	return 0;
}

2788 2789
static int i915_sink_crc(struct seq_file *m, void *data)
{
2790 2791
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2792
	struct intel_connector *connector;
2793
	struct drm_connector_list_iter conn_iter;
2794 2795 2796 2797 2798
	struct intel_dp *intel_dp = NULL;
	int ret;
	u8 crc[6];

	drm_modeset_lock_all(dev);
2799 2800
	drm_connector_list_iter_begin(dev, &conn_iter);
	for_each_intel_connector_iter(connector, &conn_iter) {
2801
		struct drm_crtc *crtc;
2802

2803
		if (!connector->base.state->best_encoder)
2804 2805
			continue;

2806 2807
		crtc = connector->base.state->crtc;
		if (!crtc->state->active)
2808 2809
			continue;

2810
		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2811 2812
			continue;

2813
		intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825

		ret = intel_dp_sink_crc(intel_dp, crc);
		if (ret)
			goto out;

		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
			   crc[0], crc[1], crc[2],
			   crc[3], crc[4], crc[5]);
		goto out;
	}
	ret = -ENODEV;
out:
2826
	drm_connector_list_iter_end(&conn_iter);
2827 2828 2829 2830
	drm_modeset_unlock_all(dev);
	return ret;
}

2831 2832
static int i915_energy_uJ(struct seq_file *m, void *data)
{
2833
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2834
	unsigned long long power;
2835 2836
	u32 units;

2837
	if (INTEL_GEN(dev_priv) < 6)
2838 2839
		return -ENODEV;

2840 2841
	intel_runtime_pm_get(dev_priv);

2842 2843 2844 2845 2846 2847
	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
		intel_runtime_pm_put(dev_priv);
		return -ENODEV;
	}

	units = (power & 0x1f00) >> 8;
2848
	power = I915_READ(MCH_SECP_NRG_STTS);
2849
	power = (1000000 * power) >> units; /* convert to uJ */
2850

2851 2852
	intel_runtime_pm_put(dev_priv);

2853
	seq_printf(m, "%llu", power);
2854 2855 2856 2857

	return 0;
}

2858
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2859
{
2860
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
D
David Weinehall 已提交
2861
	struct pci_dev *pdev = dev_priv->drm.pdev;
2862

2863 2864
	if (!HAS_RUNTIME_PM(dev_priv))
		seq_puts(m, "Runtime power management not supported\n");
2865

2866
	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2867
	seq_printf(m, "IRQs disabled: %s\n",
2868
		   yesno(!intel_irqs_enabled(dev_priv)));
2869
#ifdef CONFIG_PM
2870
	seq_printf(m, "Usage count: %d\n",
2871
		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2872 2873 2874
#else
	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
2875
	seq_printf(m, "PCI device power state: %s [%d]\n",
D
David Weinehall 已提交
2876 2877
		   pci_power_name(pdev->current_state),
		   pdev->current_state);
2878

2879 2880 2881
	return 0;
}

2882 2883
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
2884
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
	int i;

	mutex_lock(&power_domains->lock);

	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
	for (i = 0; i < power_domains->power_well_count; i++) {
		struct i915_power_well *power_well;
		enum intel_display_power_domain power_domain;

		power_well = &power_domains->power_wells[i];
		seq_printf(m, "%-25s %d\n", power_well->name,
			   power_well->count);

2899
		for_each_power_domain(power_domain, power_well->domains)
2900
			seq_printf(m, "  %-23s %d\n",
2901
				 intel_display_power_domain_str(power_domain),
2902 2903 2904 2905 2906 2907 2908 2909
				 power_domains->domain_use_count[power_domain]);
	}

	mutex_unlock(&power_domains->lock);

	return 0;
}

2910 2911
static int i915_dmc_info(struct seq_file *m, void *unused)
{
2912
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2913 2914
	struct intel_csr *csr;

2915
	if (!HAS_CSR(dev_priv)) {
2916 2917 2918 2919 2920 2921
		seq_puts(m, "not supported\n");
		return 0;
	}

	csr = &dev_priv->csr;

2922 2923
	intel_runtime_pm_get(dev_priv);

2924 2925 2926 2927
	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
	seq_printf(m, "path: %s\n", csr->fw_path);

	if (!csr->dmc_payload)
2928
		goto out;
2929 2930 2931 2932

	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
		   CSR_VERSION_MINOR(csr->version));

2933 2934
	if (IS_KABYLAKE(dev_priv) ||
	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2935 2936 2937 2938
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
		seq_printf(m, "DC5 -> DC6 count: %d\n",
			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2939
	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2940 2941
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2942 2943
	}

2944 2945 2946 2947 2948
out:
	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));

2949 2950
	intel_runtime_pm_put(dev_priv);

2951 2952 2953
	return 0;
}

2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
static void intel_seq_print_mode(struct seq_file *m, int tabs,
				 struct drm_display_mode *mode)
{
	int i;

	for (i = 0; i < tabs; i++)
		seq_putc(m, '\t');

	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
		   mode->base.id, mode->name,
		   mode->vrefresh, mode->clock,
		   mode->hdisplay, mode->hsync_start,
		   mode->hsync_end, mode->htotal,
		   mode->vdisplay, mode->vsync_start,
		   mode->vsync_end, mode->vtotal,
		   mode->type, mode->flags);
}

static void intel_encoder_info(struct seq_file *m,
			       struct intel_crtc *intel_crtc,
			       struct intel_encoder *intel_encoder)
{
2976 2977
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2978 2979 2980 2981 2982 2983
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_connector *intel_connector;
	struct drm_encoder *encoder;

	encoder = &intel_encoder->base;
	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2984
		   encoder->base.id, encoder->name);
2985 2986 2987 2988
	for_each_connector_on_encoder(dev, encoder, intel_connector) {
		struct drm_connector *connector = &intel_connector->base;
		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
			   connector->base.id,
2989
			   connector->name,
2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
			   drm_get_connector_status_name(connector->status));
		if (connector->status == connector_status_connected) {
			struct drm_display_mode *mode = &crtc->mode;
			seq_printf(m, ", mode:\n");
			intel_seq_print_mode(m, 2, mode);
		} else {
			seq_putc(m, '\n');
		}
	}
}

static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
3003 3004
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3005 3006
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_encoder *intel_encoder;
3007 3008
	struct drm_plane_state *plane_state = crtc->primary->state;
	struct drm_framebuffer *fb = plane_state->fb;
3009

3010
	if (fb)
3011
		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3012 3013
			   fb->base.id, plane_state->src_x >> 16,
			   plane_state->src_y >> 16, fb->width, fb->height);
3014 3015
	else
		seq_puts(m, "\tprimary plane disabled\n");
3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
		intel_encoder_info(m, intel_crtc, intel_encoder);
}

static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
	struct drm_display_mode *mode = panel->fixed_mode;

	seq_printf(m, "\tfixed mode:\n");
	intel_seq_print_mode(m, 2, mode);
}

static void intel_dp_info(struct seq_file *m,
			  struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);

	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3035
	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3036
	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3037
		intel_panel_info(m, &intel_connector->panel);
3038 3039 3040

	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
				&intel_dp->aux);
3041 3042
}

L
Libin Yang 已提交
3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056
static void intel_dp_mst_info(struct seq_file *m,
			  struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_dp_mst_encoder *intel_mst =
		enc_to_mst(&intel_encoder->base);
	struct intel_digital_port *intel_dig_port = intel_mst->primary;
	struct intel_dp *intel_dp = &intel_dig_port->dp;
	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
					intel_connector->port);

	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
}

3057 3058 3059 3060 3061 3062
static void intel_hdmi_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);

3063
	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076
}

static void intel_lvds_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	intel_panel_info(m, &intel_connector->panel);
}

static void intel_connector_info(struct seq_file *m,
				 struct drm_connector *connector)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct intel_encoder *intel_encoder = intel_connector->encoder;
3077
	struct drm_display_mode *mode;
3078 3079

	seq_printf(m, "connector %d: type %s, status: %s\n",
3080
		   connector->base.id, connector->name,
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
		   drm_get_connector_status_name(connector->status));
	if (connector->status == connector_status_connected) {
		seq_printf(m, "\tname: %s\n", connector->display_info.name);
		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
			   connector->display_info.width_mm,
			   connector->display_info.height_mm);
		seq_printf(m, "\tsubpixel order: %s\n",
			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
		seq_printf(m, "\tCEA rev: %d\n",
			   connector->display_info.cea_rev);
	}
3092

3093
	if (!intel_encoder)
3094 3095 3096 3097 3098
		return;

	switch (connector->connector_type) {
	case DRM_MODE_CONNECTOR_DisplayPort:
	case DRM_MODE_CONNECTOR_eDP:
L
Libin Yang 已提交
3099 3100 3101 3102
		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
			intel_dp_mst_info(m, intel_connector);
		else
			intel_dp_info(m, intel_connector);
3103 3104 3105
		break;
	case DRM_MODE_CONNECTOR_LVDS:
		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3106
			intel_lvds_info(m, intel_connector);
3107 3108 3109 3110 3111 3112 3113 3114
		break;
	case DRM_MODE_CONNECTOR_HDMIA:
		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
		    intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
			intel_hdmi_info(m, intel_connector);
		break;
	default:
		break;
3115
	}
3116

3117 3118 3119
	seq_printf(m, "\tmodes:\n");
	list_for_each_entry(mode, &connector->modes, head)
		intel_seq_print_mode(m, 2, mode);
3120 3121
}

3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
static const char *plane_type(enum drm_plane_type type)
{
	switch (type) {
	case DRM_PLANE_TYPE_OVERLAY:
		return "OVL";
	case DRM_PLANE_TYPE_PRIMARY:
		return "PRI";
	case DRM_PLANE_TYPE_CURSOR:
		return "CUR";
	/*
	 * Deliberately omitting default: to generate compiler warnings
	 * when a new drm_plane_type gets added.
	 */
	}

	return "unknown";
}

static const char *plane_rotation(unsigned int rotation)
{
	static char buf[48];
	/*
3144
	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3145 3146 3147 3148
	 * will print them all to visualize if the values are misused
	 */
	snprintf(buf, sizeof(buf),
		 "%s%s%s%s%s%s(0x%08x)",
3149 3150 3151 3152 3153 3154
		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3155 3156 3157 3158 3159 3160 3161
		 rotation);

	return buf;
}

static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
3162 3163
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3164 3165 3166 3167 3168
	struct intel_plane *intel_plane;

	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
		struct drm_plane_state *state;
		struct drm_plane *plane = &intel_plane->base;
3169
		struct drm_format_name_buf format_name;
3170 3171 3172 3173 3174 3175 3176 3177

		if (!plane->state) {
			seq_puts(m, "plane->state is NULL!\n");
			continue;
		}

		state = plane->state;

3178
		if (state->fb) {
V
Ville Syrjälä 已提交
3179 3180
			drm_get_format_name(state->fb->format->format,
					    &format_name);
3181
		} else {
3182
			sprintf(format_name.str, "N/A");
3183 3184
		}

3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
			   plane->base.id,
			   plane_type(intel_plane->base.type),
			   state->crtc_x, state->crtc_y,
			   state->crtc_w, state->crtc_h,
			   (state->src_x >> 16),
			   ((state->src_x & 0xffff) * 15625) >> 10,
			   (state->src_y >> 16),
			   ((state->src_y & 0xffff) * 15625) >> 10,
			   (state->src_w >> 16),
			   ((state->src_w & 0xffff) * 15625) >> 10,
			   (state->src_h >> 16),
			   ((state->src_h & 0xffff) * 15625) >> 10,
3198
			   format_name.str,
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
			   plane_rotation(state->rotation));
	}
}

static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
	struct intel_crtc_state *pipe_config;
	int num_scalers = intel_crtc->num_scalers;
	int i;

	pipe_config = to_intel_crtc_state(intel_crtc->base.state);

	/* Not all platformas have a scaler */
	if (num_scalers) {
		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
			   num_scalers,
			   pipe_config->scaler_state.scaler_users,
			   pipe_config->scaler_state.scaler_id);

3218
		for (i = 0; i < num_scalers; i++) {
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
			struct intel_scaler *sc =
					&pipe_config->scaler_state.scalers[i];

			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
				   i, yesno(sc->in_use), sc->mode);
		}
		seq_puts(m, "\n");
	} else {
		seq_puts(m, "\tNo scalers available on this platform\n");
	}
}

3231 3232
static int i915_display_info(struct seq_file *m, void *unused)
{
3233 3234
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3235
	struct intel_crtc *crtc;
3236
	struct drm_connector *connector;
3237
	struct drm_connector_list_iter conn_iter;
3238

3239
	intel_runtime_pm_get(dev_priv);
3240 3241
	seq_printf(m, "CRTC info\n");
	seq_printf(m, "---------\n");
3242
	for_each_intel_crtc(dev, crtc) {
3243
		struct intel_crtc_state *pipe_config;
3244

3245
		drm_modeset_lock(&crtc->base.mutex, NULL);
3246 3247
		pipe_config = to_intel_crtc_state(crtc->base.state);

3248
		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3249
			   crtc->base.base.id, pipe_name(crtc->pipe),
3250
			   yesno(pipe_config->base.active),
3251 3252 3253
			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
			   yesno(pipe_config->dither), pipe_config->pipe_bpp);

3254
		if (pipe_config->base.active) {
3255 3256 3257
			struct intel_plane *cursor =
				to_intel_plane(crtc->base.cursor);

3258 3259
			intel_crtc_info(m, crtc);

3260 3261 3262 3263 3264 3265 3266
			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
				   yesno(cursor->base.state->visible),
				   cursor->base.state->crtc_x,
				   cursor->base.state->crtc_y,
				   cursor->base.state->crtc_w,
				   cursor->base.state->crtc_h,
				   cursor->cursor.base);
3267 3268
			intel_scaler_info(m, crtc);
			intel_plane_info(m, crtc);
3269
		}
3270 3271 3272 3273

		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
			   yesno(!crtc->cpu_fifo_underrun_disabled),
			   yesno(!crtc->pch_fifo_underrun_disabled));
3274
		drm_modeset_unlock(&crtc->base.mutex);
3275 3276 3277 3278 3279
	}

	seq_printf(m, "\n");
	seq_printf(m, "Connector info\n");
	seq_printf(m, "--------------\n");
3280 3281 3282
	mutex_lock(&dev->mode_config.mutex);
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter)
3283
		intel_connector_info(m, connector);
3284 3285 3286
	drm_connector_list_iter_end(&conn_iter);
	mutex_unlock(&dev->mode_config.mutex);

3287
	intel_runtime_pm_put(dev_priv);
3288 3289 3290 3291

	return 0;
}

3292 3293 3294 3295
static int i915_engine_info(struct seq_file *m, void *unused)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct intel_engine_cs *engine;
3296
	enum intel_engine_id id;
3297
	struct drm_printer p;
3298

3299 3300
	intel_runtime_pm_get(dev_priv);

3301 3302 3303 3304 3305
	seq_printf(m, "GT awake? %s\n",
		   yesno(dev_priv->gt.awake));
	seq_printf(m, "Global active requests: %d\n",
		   dev_priv->gt.active_requests);

3306 3307 3308
	p = drm_seq_file_printer(m);
	for_each_engine(engine, dev_priv, id)
		intel_engine_dump(engine, &p);
3309

3310 3311
	intel_runtime_pm_put(dev_priv);

3312 3313 3314
	return 0;
}

B
Ben Widawsky 已提交
3315 3316
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
3317 3318
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3319
	struct intel_engine_cs *engine;
3320
	int num_rings = INTEL_INFO(dev_priv)->num_rings;
3321 3322
	enum intel_engine_id id;
	int j, ret;
B
Ben Widawsky 已提交
3323

3324
	if (!i915_modparams.semaphores) {
B
Ben Widawsky 已提交
3325 3326 3327 3328 3329 3330 3331
		seq_puts(m, "Semaphores are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
3332
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
3333

3334
	if (IS_BROADWELL(dev_priv)) {
B
Ben Widawsky 已提交
3335 3336 3337
		struct page *page;
		uint64_t *seqno;

3338
		page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
B
Ben Widawsky 已提交
3339 3340

		seqno = (uint64_t *)kmap_atomic(page);
3341
		for_each_engine(engine, dev_priv, id) {
B
Ben Widawsky 已提交
3342 3343
			uint64_t offset;

3344
			seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
3345 3346 3347

			seq_puts(m, "  Last signal:");
			for (j = 0; j < num_rings; j++) {
3348
				offset = id * I915_NUM_ENGINES + j;
B
Ben Widawsky 已提交
3349 3350 3351 3352 3353 3354 3355
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

			seq_puts(m, "  Last wait:  ");
			for (j = 0; j < num_rings; j++) {
3356
				offset = id + (j * I915_NUM_ENGINES);
B
Ben Widawsky 已提交
3357 3358 3359 3360 3361 3362 3363 3364 3365
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

		}
		kunmap_atomic(seqno);
	} else {
		seq_puts(m, "  Last signal:");
3366
		for_each_engine(engine, dev_priv, id)
B
Ben Widawsky 已提交
3367 3368
			for (j = 0; j < num_rings; j++)
				seq_printf(m, "0x%08x\n",
3369
					   I915_READ(engine->semaphore.mbox.signal[j]));
B
Ben Widawsky 已提交
3370 3371 3372
		seq_putc(m, '\n');
	}

3373
	intel_runtime_pm_put(dev_priv);
B
Ben Widawsky 已提交
3374 3375 3376 3377
	mutex_unlock(&dev->struct_mutex);
	return 0;
}

3378 3379
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
3380 3381
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3382 3383 3384 3385 3386 3387 3388
	int i;

	drm_modeset_lock_all(dev);
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];

		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3389
		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3390
			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3391
		seq_printf(m, " tracked hardware state:\n");
3392
		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3393
		seq_printf(m, " dpll_md: 0x%08x\n",
3394 3395 3396 3397
			   pll->state.hw_state.dpll_md);
		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3398 3399 3400 3401 3402 3403
	}
	drm_modeset_unlock_all(dev);

	return 0;
}

3404
static int i915_wa_registers(struct seq_file *m, void *unused)
3405 3406 3407
{
	int i;
	int ret;
3408
	struct intel_engine_cs *engine;
3409 3410
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3411
	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3412
	enum intel_engine_id id;
3413 3414 3415 3416 3417 3418 3419

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(dev_priv);

3420
	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3421
	for_each_engine(engine, dev_priv, id)
3422
		seq_printf(m, "HW whitelist count for %s: %d\n",
3423
			   engine->name, workarounds->hw_whitelist_count[id]);
3424
	for (i = 0; i < workarounds->count; ++i) {
3425 3426
		i915_reg_t addr;
		u32 mask, value, read;
3427
		bool ok;
3428

3429 3430 3431
		addr = workarounds->reg[i].addr;
		mask = workarounds->reg[i].mask;
		value = workarounds->reg[i].value;
3432 3433 3434
		read = I915_READ(addr);
		ok = (value & mask) == (read & mask);
		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3435
			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3436 3437 3438 3439 3440 3441 3442 3443
	}

	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
static int i915_ipc_status_show(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = m->private;

	seq_printf(m, "Isochronous Priority Control: %s\n",
			yesno(dev_priv->ipc_enabled));
	return 0;
}

static int i915_ipc_status_open(struct inode *inode, struct file *file)
{
	struct drm_i915_private *dev_priv = inode->i_private;

	if (!HAS_IPC(dev_priv))
		return -ENODEV;

	return single_open(file, i915_ipc_status_show, dev_priv);
}

static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
				     size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_i915_private *dev_priv = m->private;
	int ret;
	bool enable;

	ret = kstrtobool_from_user(ubuf, len, &enable);
	if (ret < 0)
		return ret;

	intel_runtime_pm_get(dev_priv);
	if (!dev_priv->ipc_enabled && enable)
		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
	dev_priv->wm.distrust_bios_wm = true;
	dev_priv->ipc_enabled = enable;
	intel_enable_ipc(dev_priv);
	intel_runtime_pm_put(dev_priv);

	return len;
}

static const struct file_operations i915_ipc_status_fops = {
	.owner = THIS_MODULE,
	.open = i915_ipc_status_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = i915_ipc_status_write
};

3495 3496
static int i915_ddb_info(struct seq_file *m, void *unused)
{
3497 3498
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3499 3500 3501 3502 3503
	struct skl_ddb_allocation *ddb;
	struct skl_ddb_entry *entry;
	enum pipe pipe;
	int plane;

3504
	if (INTEL_GEN(dev_priv) < 9)
3505 3506
		return 0;

3507 3508 3509 3510 3511 3512 3513 3514 3515
	drm_modeset_lock_all(dev);

	ddb = &dev_priv->wm.skl_hw.ddb;

	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");

	for_each_pipe(dev_priv, pipe) {
		seq_printf(m, "Pipe %c\n", pipe_name(pipe));

3516
		for_each_universal_plane(dev_priv, pipe, plane) {
3517 3518 3519 3520 3521 3522
			entry = &ddb->plane[pipe][plane];
			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
				   entry->start, entry->end,
				   skl_ddb_entry_size(entry));
		}

3523
		entry = &ddb->plane[pipe][PLANE_CURSOR];
3524 3525 3526 3527 3528 3529 3530 3531 3532
		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
			   entry->end, skl_ddb_entry_size(entry));
	}

	drm_modeset_unlock_all(dev);

	return 0;
}

3533
static void drrs_status_per_crtc(struct seq_file *m,
3534 3535
				 struct drm_device *dev,
				 struct intel_crtc *intel_crtc)
3536
{
3537
	struct drm_i915_private *dev_priv = to_i915(dev);
3538 3539
	struct i915_drrs *drrs = &dev_priv->drrs;
	int vrefresh = 0;
3540
	struct drm_connector *connector;
3541
	struct drm_connector_list_iter conn_iter;
3542

3543 3544
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
3545 3546 3547 3548
		if (connector->state->crtc != &intel_crtc->base)
			continue;

		seq_printf(m, "%s:\n", connector->name);
3549
	}
3550
	drm_connector_list_iter_end(&conn_iter);
3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562

	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Static");
	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Seamless");
	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
		seq_puts(m, "\tVBT: DRRS_type: None");
	else
		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");

	seq_puts(m, "\n\n");

3563
	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606
		struct intel_panel *panel;

		mutex_lock(&drrs->mutex);
		/* DRRS Supported */
		seq_puts(m, "\tDRRS Supported: Yes\n");

		/* disable_drrs() will make drrs->dp NULL */
		if (!drrs->dp) {
			seq_puts(m, "Idleness DRRS: Disabled");
			mutex_unlock(&drrs->mutex);
			return;
		}

		panel = &drrs->dp->attached_connector->panel;
		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
					drrs->busy_frontbuffer_bits);

		seq_puts(m, "\n\t\t");
		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
			vrefresh = panel->fixed_mode->vrefresh;
		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
			vrefresh = panel->downclock_mode->vrefresh;
		} else {
			seq_printf(m, "DRRS_State: Unknown(%d)\n",
						drrs->refresh_rate_type);
			mutex_unlock(&drrs->mutex);
			return;
		}
		seq_printf(m, "\t\tVrefresh: %d", vrefresh);

		seq_puts(m, "\n\t\t");
		mutex_unlock(&drrs->mutex);
	} else {
		/* DRRS not supported. Print the VBT parameter*/
		seq_puts(m, "\tDRRS Supported : No");
	}
	seq_puts(m, "\n");
}

static int i915_drrs_status(struct seq_file *m, void *unused)
{
3607 3608
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3609 3610 3611
	struct intel_crtc *intel_crtc;
	int active_crtc_cnt = 0;

3612
	drm_modeset_lock_all(dev);
3613
	for_each_intel_crtc(dev, intel_crtc) {
3614
		if (intel_crtc->base.state->active) {
3615 3616 3617 3618 3619 3620
			active_crtc_cnt++;
			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);

			drrs_status_per_crtc(m, dev, intel_crtc);
		}
	}
3621
	drm_modeset_unlock_all(dev);
3622 3623 3624 3625 3626 3627 3628

	if (!active_crtc_cnt)
		seq_puts(m, "No active crtc found\n");

	return 0;
}

3629 3630
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
3631 3632
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3633 3634
	struct intel_encoder *intel_encoder;
	struct intel_digital_port *intel_dig_port;
3635
	struct drm_connector *connector;
3636
	struct drm_connector_list_iter conn_iter;
3637

3638 3639
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
3640
		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3641
			continue;
3642 3643 3644 3645 3646 3647

		intel_encoder = intel_attached_encoder(connector);
		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3648 3649
		if (!intel_dig_port->dp.can_mst)
			continue;
3650

3651 3652
		seq_printf(m, "MST Source Port %c\n",
			   port_name(intel_dig_port->port));
3653 3654
		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
	}
3655 3656
	drm_connector_list_iter_end(&conn_iter);

3657 3658 3659
	return 0;
}

3660
static ssize_t i915_displayport_test_active_write(struct file *file,
3661 3662
						  const char __user *ubuf,
						  size_t len, loff_t *offp)
3663 3664 3665 3666 3667
{
	char *input_buffer;
	int status = 0;
	struct drm_device *dev;
	struct drm_connector *connector;
3668
	struct drm_connector_list_iter conn_iter;
3669 3670 3671
	struct intel_dp *intel_dp;
	int val = 0;

3672
	dev = ((struct seq_file *)file->private_data)->private;
3673 3674 3675 3676

	if (len == 0)
		return 0;

G
Geliang Tang 已提交
3677 3678 3679
	input_buffer = memdup_user_nul(ubuf, len);
	if (IS_ERR(input_buffer))
		return PTR_ERR(input_buffer);
3680 3681 3682

	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);

3683 3684
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
3685 3686
		struct intel_encoder *encoder;

3687 3688 3689 3690
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

3691 3692 3693 3694 3695 3696
		encoder = to_intel_encoder(connector->encoder);
		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		if (encoder && connector->status == connector_status_connected) {
			intel_dp = enc_to_intel_dp(&encoder->base);
3697 3698
			status = kstrtoint(input_buffer, 10, &val);
			if (status < 0)
3699
				break;
3700 3701 3702 3703 3704
			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
			/* To prevent erroneous activation of the compliance
			 * testing code, only accept an actual value of 1 here
			 */
			if (val == 1)
3705
				intel_dp->compliance.test_active = 1;
3706
			else
3707
				intel_dp->compliance.test_active = 0;
3708 3709
		}
	}
3710
	drm_connector_list_iter_end(&conn_iter);
3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722
	kfree(input_buffer);
	if (status < 0)
		return status;

	*offp += len;
	return len;
}

static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
3723
	struct drm_connector_list_iter conn_iter;
3724 3725
	struct intel_dp *intel_dp;

3726 3727
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
3728 3729
		struct intel_encoder *encoder;

3730 3731 3732 3733
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

3734 3735 3736 3737 3738 3739
		encoder = to_intel_encoder(connector->encoder);
		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		if (encoder && connector->status == connector_status_connected) {
			intel_dp = enc_to_intel_dp(&encoder->base);
3740
			if (intel_dp->compliance.test_active)
3741 3742 3743 3744 3745 3746
				seq_puts(m, "1");
			else
				seq_puts(m, "0");
		} else
			seq_puts(m, "0");
	}
3747
	drm_connector_list_iter_end(&conn_iter);
3748 3749 3750 3751 3752

	return 0;
}

static int i915_displayport_test_active_open(struct inode *inode,
3753
					     struct file *file)
3754
{
3755
	struct drm_i915_private *dev_priv = inode->i_private;
3756

3757 3758
	return single_open(file, i915_displayport_test_active_show,
			   &dev_priv->drm);
3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
}

static const struct file_operations i915_displayport_test_active_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_active_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = i915_displayport_test_active_write
};

static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
3774
	struct drm_connector_list_iter conn_iter;
3775 3776
	struct intel_dp *intel_dp;

3777 3778
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
3779 3780
		struct intel_encoder *encoder;

3781 3782 3783 3784
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

3785 3786 3787 3788 3789 3790
		encoder = to_intel_encoder(connector->encoder);
		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		if (encoder && connector->status == connector_status_connected) {
			intel_dp = enc_to_intel_dp(&encoder->base);
3791 3792 3793 3794
			if (intel_dp->compliance.test_type ==
			    DP_TEST_LINK_EDID_READ)
				seq_printf(m, "%lx",
					   intel_dp->compliance.test_data.edid);
3795 3796 3797 3798 3799 3800 3801 3802 3803
			else if (intel_dp->compliance.test_type ==
				 DP_TEST_LINK_VIDEO_PATTERN) {
				seq_printf(m, "hdisplay: %d\n",
					   intel_dp->compliance.test_data.hdisplay);
				seq_printf(m, "vdisplay: %d\n",
					   intel_dp->compliance.test_data.vdisplay);
				seq_printf(m, "bpc: %u\n",
					   intel_dp->compliance.test_data.bpc);
			}
3804 3805 3806
		} else
			seq_puts(m, "0");
	}
3807
	drm_connector_list_iter_end(&conn_iter);
3808 3809 3810 3811

	return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
3812
					   struct file *file)
3813
{
3814
	struct drm_i915_private *dev_priv = inode->i_private;
3815

3816 3817
	return single_open(file, i915_displayport_test_data_show,
			   &dev_priv->drm);
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831
}

static const struct file_operations i915_displayport_test_data_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_data_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
3832
	struct drm_connector_list_iter conn_iter;
3833 3834
	struct intel_dp *intel_dp;

3835 3836
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
3837 3838
		struct intel_encoder *encoder;

3839 3840 3841 3842
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

3843 3844 3845 3846 3847 3848
		encoder = to_intel_encoder(connector->encoder);
		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		if (encoder && connector->status == connector_status_connected) {
			intel_dp = enc_to_intel_dp(&encoder->base);
3849
			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3850 3851 3852
		} else
			seq_puts(m, "0");
	}
3853
	drm_connector_list_iter_end(&conn_iter);
3854 3855 3856 3857 3858 3859 3860

	return 0;
}

static int i915_displayport_test_type_open(struct inode *inode,
				       struct file *file)
{
3861
	struct drm_i915_private *dev_priv = inode->i_private;
3862

3863 3864
	return single_open(file, i915_displayport_test_type_show,
			   &dev_priv->drm);
3865 3866 3867 3868 3869 3870 3871 3872 3873 3874
}

static const struct file_operations i915_displayport_test_type_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_type_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

3875
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3876
{
3877 3878
	struct drm_i915_private *dev_priv = m->private;
	struct drm_device *dev = &dev_priv->drm;
3879
	int level;
3880 3881
	int num_levels;

3882
	if (IS_CHERRYVIEW(dev_priv))
3883
		num_levels = 3;
3884
	else if (IS_VALLEYVIEW(dev_priv))
3885
		num_levels = 1;
3886 3887
	else if (IS_G4X(dev_priv))
		num_levels = 3;
3888
	else
3889
		num_levels = ilk_wm_max_level(dev_priv) + 1;
3890 3891 3892 3893 3894 3895

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++) {
		unsigned int latency = wm[level];

3896 3897
		/*
		 * - WM1+ latency values in 0.5us units
3898
		 * - latencies are in us on gen9/vlv/chv
3899
		 */
3900 3901 3902 3903
		if (INTEL_GEN(dev_priv) >= 9 ||
		    IS_VALLEYVIEW(dev_priv) ||
		    IS_CHERRYVIEW(dev_priv) ||
		    IS_G4X(dev_priv))
3904 3905
			latency *= 10;
		else if (level > 0)
3906 3907 3908
			latency *= 5;

		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3909
			   level, wm[level], latency / 10, latency % 10);
3910 3911 3912 3913 3914 3915 3916
	}

	drm_modeset_unlock_all(dev);
}

static int pri_wm_latency_show(struct seq_file *m, void *data)
{
3917
	struct drm_i915_private *dev_priv = m->private;
3918 3919
	const uint16_t *latencies;

3920
	if (INTEL_GEN(dev_priv) >= 9)
3921 3922
		latencies = dev_priv->wm.skl_latency;
	else
3923
		latencies = dev_priv->wm.pri_latency;
3924

3925
	wm_latency_show(m, latencies);
3926 3927 3928 3929 3930 3931

	return 0;
}

static int spr_wm_latency_show(struct seq_file *m, void *data)
{
3932
	struct drm_i915_private *dev_priv = m->private;
3933 3934
	const uint16_t *latencies;

3935
	if (INTEL_GEN(dev_priv) >= 9)
3936 3937
		latencies = dev_priv->wm.skl_latency;
	else
3938
		latencies = dev_priv->wm.spr_latency;
3939

3940
	wm_latency_show(m, latencies);
3941 3942 3943 3944 3945 3946

	return 0;
}

static int cur_wm_latency_show(struct seq_file *m, void *data)
{
3947
	struct drm_i915_private *dev_priv = m->private;
3948 3949
	const uint16_t *latencies;

3950
	if (INTEL_GEN(dev_priv) >= 9)
3951 3952
		latencies = dev_priv->wm.skl_latency;
	else
3953
		latencies = dev_priv->wm.cur_latency;
3954

3955
	wm_latency_show(m, latencies);
3956 3957 3958 3959 3960 3961

	return 0;
}

static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
3962
	struct drm_i915_private *dev_priv = inode->i_private;
3963

3964
	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3965 3966
		return -ENODEV;

3967
	return single_open(file, pri_wm_latency_show, dev_priv);
3968 3969 3970 3971
}

static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
3972
	struct drm_i915_private *dev_priv = inode->i_private;
3973

3974
	if (HAS_GMCH_DISPLAY(dev_priv))
3975 3976
		return -ENODEV;

3977
	return single_open(file, spr_wm_latency_show, dev_priv);
3978 3979 3980 3981
}

static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
3982
	struct drm_i915_private *dev_priv = inode->i_private;
3983

3984
	if (HAS_GMCH_DISPLAY(dev_priv))
3985 3986
		return -ENODEV;

3987
	return single_open(file, cur_wm_latency_show, dev_priv);
3988 3989 3990
}

static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3991
				size_t len, loff_t *offp, uint16_t wm[8])
3992 3993
{
	struct seq_file *m = file->private_data;
3994 3995
	struct drm_i915_private *dev_priv = m->private;
	struct drm_device *dev = &dev_priv->drm;
3996
	uint16_t new[8] = { 0 };
3997
	int num_levels;
3998 3999 4000 4001
	int level;
	int ret;
	char tmp[32];

4002
	if (IS_CHERRYVIEW(dev_priv))
4003
		num_levels = 3;
4004
	else if (IS_VALLEYVIEW(dev_priv))
4005
		num_levels = 1;
4006 4007
	else if (IS_G4X(dev_priv))
		num_levels = 3;
4008
	else
4009
		num_levels = ilk_wm_max_level(dev_priv) + 1;
4010

4011 4012 4013 4014 4015 4016 4017 4018
	if (len >= sizeof(tmp))
		return -EINVAL;

	if (copy_from_user(tmp, ubuf, len))
		return -EFAULT;

	tmp[len] = '\0';

4019 4020 4021
	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
		     &new[0], &new[1], &new[2], &new[3],
		     &new[4], &new[5], &new[6], &new[7]);
4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039
	if (ret != num_levels)
		return -EINVAL;

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++)
		wm[level] = new[level];

	drm_modeset_unlock_all(dev);

	return len;
}


static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
4040
	struct drm_i915_private *dev_priv = m->private;
4041
	uint16_t *latencies;
4042

4043
	if (INTEL_GEN(dev_priv) >= 9)
4044 4045
		latencies = dev_priv->wm.skl_latency;
	else
4046
		latencies = dev_priv->wm.pri_latency;
4047 4048

	return wm_latency_write(file, ubuf, len, offp, latencies);
4049 4050 4051 4052 4053 4054
}

static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
4055
	struct drm_i915_private *dev_priv = m->private;
4056
	uint16_t *latencies;
4057

4058
	if (INTEL_GEN(dev_priv) >= 9)
4059 4060
		latencies = dev_priv->wm.skl_latency;
	else
4061
		latencies = dev_priv->wm.spr_latency;
4062 4063

	return wm_latency_write(file, ubuf, len, offp, latencies);
4064 4065 4066 4067 4068 4069
}

static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
4070
	struct drm_i915_private *dev_priv = m->private;
4071 4072
	uint16_t *latencies;

4073
	if (INTEL_GEN(dev_priv) >= 9)
4074 4075
		latencies = dev_priv->wm.skl_latency;
	else
4076
		latencies = dev_priv->wm.cur_latency;
4077

4078
	return wm_latency_write(file, ubuf, len, offp, latencies);
4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
}

static const struct file_operations i915_pri_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = pri_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = pri_wm_latency_write
};

static const struct file_operations i915_spr_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = spr_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = spr_wm_latency_write
};

static const struct file_operations i915_cur_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = cur_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = cur_wm_latency_write
};

4108 4109
static int
i915_wedged_get(void *data, u64 *val)
4110
{
4111
	struct drm_i915_private *dev_priv = data;
4112

4113
	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4114

4115
	return 0;
4116 4117
}

4118 4119
static int
i915_wedged_set(void *data, u64 val)
4120
{
4121 4122 4123
	struct drm_i915_private *i915 = data;
	struct intel_engine_cs *engine;
	unsigned int tmp;
4124

4125 4126 4127 4128 4129 4130 4131 4132
	/*
	 * There is no safeguard against this debugfs entry colliding
	 * with the hangcheck calling same i915_handle_error() in
	 * parallel, causing an explosion. For now we assume that the
	 * test harness is responsible enough not to inject gpu hangs
	 * while it is writing to 'i915_wedged'
	 */

4133
	if (i915_reset_backoff(&i915->gpu_error))
4134 4135
		return -EAGAIN;

4136 4137 4138 4139 4140 4141
	for_each_engine_masked(engine, i915, val, tmp) {
		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
		engine->hangcheck.stalled = true;
	}

	i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
4142

4143
	wait_on_bit(&i915->gpu_error.flags,
4144 4145 4146
		    I915_RESET_HANDOFF,
		    TASK_UNINTERRUPTIBLE);

4147
	return 0;
4148 4149
}

4150 4151
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
4152
			"%llu\n");
4153

4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174
static int
fault_irq_set(struct drm_i915_private *i915,
	      unsigned long *irq,
	      unsigned long val)
{
	int err;

	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (err)
		return err;

	err = i915_gem_wait_for_idle(i915,
				     I915_WAIT_LOCKED |
				     I915_WAIT_INTERRUPTIBLE);
	if (err)
		goto err_unlock;

	*irq = val;
	mutex_unlock(&i915->drm.struct_mutex);

	/* Flush idle worker to disarm irq */
4175
	drain_delayed_work(&i915->gt.idle_work);
4176 4177 4178 4179 4180 4181 4182 4183

	return 0;

err_unlock:
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

4184 4185 4186
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
4187
	struct drm_i915_private *dev_priv = data;
4188 4189 4190 4191 4192 4193 4194 4195

	*val = dev_priv->gpu_error.missed_irq_rings;
	return 0;
}

static int
i915_ring_missed_irq_set(void *data, u64 val)
{
4196
	struct drm_i915_private *i915 = data;
4197

4198
	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4199 4200 4201 4202 4203 4204 4205 4206 4207
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
			"0x%08llx\n");

static int
i915_ring_test_irq_get(void *data, u64 *val)
{
4208
	struct drm_i915_private *dev_priv = data;
4209 4210 4211 4212 4213 4214 4215 4216 4217

	*val = dev_priv->gpu_error.test_irq_rings;

	return 0;
}

static int
i915_ring_test_irq_set(void *data, u64 val)
{
4218
	struct drm_i915_private *i915 = data;
4219

4220
	val &= INTEL_INFO(i915)->ring_mask;
4221 4222
	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);

4223
	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4224 4225 4226 4227 4228 4229
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
			i915_ring_test_irq_get, i915_ring_test_irq_set,
			"0x%08llx\n");

4230 4231 4232 4233
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
4234
#define DROP_FREED 0x10
4235
#define DROP_SHRINK_ALL 0x20
4236 4237 4238 4239
#define DROP_ALL (DROP_UNBOUND	| \
		  DROP_BOUND	| \
		  DROP_RETIRE	| \
		  DROP_ACTIVE	| \
4240 4241
		  DROP_FREED	| \
		  DROP_SHRINK_ALL)
4242 4243
static int
i915_drop_caches_get(void *data, u64 *val)
4244
{
4245
	*val = DROP_ALL;
4246

4247
	return 0;
4248 4249
}

4250 4251
static int
i915_drop_caches_set(void *data, u64 val)
4252
{
4253 4254
	struct drm_i915_private *dev_priv = data;
	struct drm_device *dev = &dev_priv->drm;
4255
	int ret = 0;
4256

4257
	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4258 4259 4260

	/* No need to check and wait for gpu resets, only libdrm auto-restarts
	 * on ioctls on -EAGAIN. */
4261 4262
	if (val & (DROP_ACTIVE | DROP_RETIRE)) {
		ret = mutex_lock_interruptible(&dev->struct_mutex);
4263
		if (ret)
4264
			return ret;
4265

4266 4267 4268 4269 4270 4271 4272 4273 4274 4275
		if (val & DROP_ACTIVE)
			ret = i915_gem_wait_for_idle(dev_priv,
						     I915_WAIT_INTERRUPTIBLE |
						     I915_WAIT_LOCKED);

		if (val & DROP_RETIRE)
			i915_gem_retire_requests(dev_priv);

		mutex_unlock(&dev->struct_mutex);
	}
4276

4277
	fs_reclaim_acquire(GFP_KERNEL);
4278
	if (val & DROP_BOUND)
4279
		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4280

4281
	if (val & DROP_UNBOUND)
4282
		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4283

4284 4285
	if (val & DROP_SHRINK_ALL)
		i915_gem_shrink_all(dev_priv);
4286
	fs_reclaim_release(GFP_KERNEL);
4287

4288 4289
	if (val & DROP_FREED) {
		synchronize_rcu();
4290
		i915_gem_drain_freed_objects(dev_priv);
4291 4292
	}

4293
	return ret;
4294 4295
}

4296 4297 4298
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
			i915_drop_caches_get, i915_drop_caches_set,
			"0x%08llx\n");
4299

4300 4301
static int
i915_max_freq_get(void *data, u64 *val)
4302
{
4303
	struct drm_i915_private *dev_priv = data;
4304

4305
	if (INTEL_GEN(dev_priv) < 6)
4306 4307
		return -ENODEV;

4308
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4309
	return 0;
4310 4311
}

4312 4313
static int
i915_max_freq_set(void *data, u64 val)
4314
{
4315
	struct drm_i915_private *dev_priv = data;
4316
	u32 hw_max, hw_min;
4317
	int ret;
4318

4319
	if (INTEL_GEN(dev_priv) < 6)
4320
		return -ENODEV;
4321

4322
	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4323

4324
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4325 4326 4327
	if (ret)
		return ret;

4328 4329 4330
	/*
	 * Turbo will still be enabled, but won't go above the set value.
	 */
4331
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4332

4333 4334
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4335

4336
	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
J
Jeff McGee 已提交
4337 4338
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4339 4340
	}

4341
	dev_priv->rps.max_freq_softlimit = val;
J
Jeff McGee 已提交
4342

4343 4344
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
J
Jeff McGee 已提交
4345

4346
	mutex_unlock(&dev_priv->rps.hw_lock);
4347

4348
	return 0;
4349 4350
}

4351 4352
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
			i915_max_freq_get, i915_max_freq_set,
4353
			"%llu\n");
4354

4355 4356
static int
i915_min_freq_get(void *data, u64 *val)
4357
{
4358
	struct drm_i915_private *dev_priv = data;
4359

4360
	if (INTEL_GEN(dev_priv) < 6)
4361 4362
		return -ENODEV;

4363
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4364
	return 0;
4365 4366
}

4367 4368
static int
i915_min_freq_set(void *data, u64 val)
4369
{
4370
	struct drm_i915_private *dev_priv = data;
4371
	u32 hw_max, hw_min;
4372
	int ret;
4373

4374
	if (INTEL_GEN(dev_priv) < 6)
4375
		return -ENODEV;
4376

4377
	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4378

4379
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4380 4381 4382
	if (ret)
		return ret;

4383 4384 4385
	/*
	 * Turbo will still be enabled, but won't go below the set value.
	 */
4386
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4387

4388 4389
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4390

4391 4392
	if (val < hw_min ||
	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
J
Jeff McGee 已提交
4393 4394
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4395
	}
J
Jeff McGee 已提交
4396

4397
	dev_priv->rps.min_freq_softlimit = val;
J
Jeff McGee 已提交
4398

4399 4400
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
J
Jeff McGee 已提交
4401

4402
	mutex_unlock(&dev_priv->rps.hw_lock);
4403

4404
	return 0;
4405 4406
}

4407 4408
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
			i915_min_freq_get, i915_min_freq_set,
4409
			"%llu\n");
4410

4411 4412
static int
i915_cache_sharing_get(void *data, u64 *val)
4413
{
4414
	struct drm_i915_private *dev_priv = data;
4415 4416
	u32 snpcr;

4417
	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4418 4419
		return -ENODEV;

4420
	intel_runtime_pm_get(dev_priv);
4421

4422
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4423 4424

	intel_runtime_pm_put(dev_priv);
4425

4426
	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4427

4428
	return 0;
4429 4430
}

4431 4432
static int
i915_cache_sharing_set(void *data, u64 val)
4433
{
4434
	struct drm_i915_private *dev_priv = data;
4435 4436
	u32 snpcr;

4437
	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4438 4439
		return -ENODEV;

4440
	if (val > 3)
4441 4442
		return -EINVAL;

4443
	intel_runtime_pm_get(dev_priv);
4444
	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4445 4446 4447 4448 4449 4450 4451

	/* Update the cache sharing policy here as well */
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);

4452
	intel_runtime_pm_put(dev_priv);
4453
	return 0;
4454 4455
}

4456 4457 4458
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
			i915_cache_sharing_get, i915_cache_sharing_set,
			"%llu\n");
4459

4460
static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4461
					  struct sseu_dev_info *sseu)
4462
{
4463
	int ss_max = 2;
4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478
	int ss;
	u32 sig1[ss_max], sig2[ss_max];

	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);

	for (ss = 0; ss < ss_max; ss++) {
		unsigned int eu_cnt;

		if (sig1[ss] & CHV_SS_PG_ENABLE)
			/* skip disabled subslice */
			continue;

4479
		sseu->slice_mask = BIT(0);
4480
		sseu->subslice_mask |= BIT(ss);
4481 4482 4483 4484
		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4485 4486 4487
		sseu->eu_total += eu_cnt;
		sseu->eu_per_subslice = max_t(unsigned int,
					      sseu->eu_per_subslice, eu_cnt);
4488 4489 4490
	}
}

4491
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4492
				    struct sseu_dev_info *sseu)
4493
{
4494
	int s_max = 3, ss_max = 4;
4495 4496 4497
	int s, ss;
	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];

4498
	/* BXT has a single slice and at most 3 subslices. */
4499
	if (IS_GEN9_LP(dev_priv)) {
4500 4501 4502 4503 4504 4505 4506 4507 4508 4509
		s_max = 1;
		ss_max = 3;
	}

	for (s = 0; s < s_max; s++) {
		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
	}

4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523
	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
		     GEN9_PGCTL_SSA_EU19_ACK |
		     GEN9_PGCTL_SSA_EU210_ACK |
		     GEN9_PGCTL_SSA_EU311_ACK;
	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
		     GEN9_PGCTL_SSB_EU19_ACK |
		     GEN9_PGCTL_SSB_EU210_ACK |
		     GEN9_PGCTL_SSB_EU311_ACK;

	for (s = 0; s < s_max; s++) {
		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
			/* skip disabled slice */
			continue;

4524
		sseu->slice_mask |= BIT(s);
4525

4526
		if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
4527 4528
			sseu->subslice_mask =
				INTEL_INFO(dev_priv)->sseu.subslice_mask;
4529

4530 4531 4532
		for (ss = 0; ss < ss_max; ss++) {
			unsigned int eu_cnt;

4533
			if (IS_GEN9_LP(dev_priv)) {
4534 4535 4536
				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
					/* skip disabled subslice */
					continue;
4537

4538 4539
				sseu->subslice_mask |= BIT(ss);
			}
4540

4541 4542
			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
					       eu_mask[ss%2]);
4543 4544 4545 4546
			sseu->eu_total += eu_cnt;
			sseu->eu_per_subslice = max_t(unsigned int,
						      sseu->eu_per_subslice,
						      eu_cnt);
4547 4548 4549 4550
		}
	}
}

4551
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4552
					 struct sseu_dev_info *sseu)
4553 4554
{
	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4555
	int s;
4556

4557
	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4558

4559
	if (sseu->slice_mask) {
4560
		sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
4561 4562
		sseu->eu_per_subslice =
				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4563 4564
		sseu->eu_total = sseu->eu_per_subslice *
				 sseu_subslice_total(sseu);
4565 4566

		/* subtract fused off EU(s) from enabled slice(s) */
4567
		for (s = 0; s < fls(sseu->slice_mask); s++) {
4568 4569
			u8 subslice_7eu =
				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4570

4571
			sseu->eu_total -= hweight8(subslice_7eu);
4572 4573 4574 4575
		}
	}
}

4576 4577 4578 4579 4580 4581
static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
				 const struct sseu_dev_info *sseu)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	const char *type = is_available_info ? "Available" : "Enabled";

4582 4583
	seq_printf(m, "  %s Slice Mask: %04x\n", type,
		   sseu->slice_mask);
4584
	seq_printf(m, "  %s Slice Total: %u\n", type,
4585
		   hweight8(sseu->slice_mask));
4586
	seq_printf(m, "  %s Subslice Total: %u\n", type,
4587
		   sseu_subslice_total(sseu));
4588 4589
	seq_printf(m, "  %s Subslice Mask: %04x\n", type,
		   sseu->subslice_mask);
4590
	seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
4591
		   hweight8(sseu->subslice_mask));
4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611
	seq_printf(m, "  %s EU Total: %u\n", type,
		   sseu->eu_total);
	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
		   sseu->eu_per_subslice);

	if (!is_available_info)
		return;

	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
	if (HAS_POOLED_EU(dev_priv))
		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);

	seq_printf(m, "  Has Slice Power Gating: %s\n",
		   yesno(sseu->has_slice_pg));
	seq_printf(m, "  Has Subslice Power Gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	seq_printf(m, "  Has EU Power Gating: %s\n",
		   yesno(sseu->has_eu_pg));
}

4612 4613
static int i915_sseu_status(struct seq_file *m, void *unused)
{
4614
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4615
	struct sseu_dev_info sseu;
4616

4617
	if (INTEL_GEN(dev_priv) < 8)
4618 4619 4620
		return -ENODEV;

	seq_puts(m, "SSEU Device Info\n");
4621
	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4622

4623
	seq_puts(m, "SSEU Device Status\n");
4624
	memset(&sseu, 0, sizeof(sseu));
4625 4626 4627

	intel_runtime_pm_get(dev_priv);

4628
	if (IS_CHERRYVIEW(dev_priv)) {
4629
		cherryview_sseu_device_status(dev_priv, &sseu);
4630
	} else if (IS_BROADWELL(dev_priv)) {
4631
		broadwell_sseu_device_status(dev_priv, &sseu);
4632
	} else if (INTEL_GEN(dev_priv) >= 9) {
4633
		gen9_sseu_device_status(dev_priv, &sseu);
4634
	}
4635 4636 4637

	intel_runtime_pm_put(dev_priv);

4638
	i915_print_sseu_info(m, false, &sseu);
4639

4640 4641 4642
	return 0;
}

4643 4644
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
4645
	struct drm_i915_private *i915 = inode->i_private;
4646

4647
	if (INTEL_GEN(i915) < 6)
4648 4649
		return 0;

4650 4651
	intel_runtime_pm_get(i915);
	intel_uncore_forcewake_user_get(i915);
4652 4653 4654 4655

	return 0;
}

4656
static int i915_forcewake_release(struct inode *inode, struct file *file)
4657
{
4658
	struct drm_i915_private *i915 = inode->i_private;
4659

4660
	if (INTEL_GEN(i915) < 6)
4661 4662
		return 0;

4663 4664
	intel_uncore_forcewake_user_put(i915);
	intel_runtime_pm_put(i915);
4665 4666 4667 4668 4669 4670 4671 4672 4673 4674

	return 0;
}

static const struct file_operations i915_forcewake_fops = {
	.owner = THIS_MODULE,
	.open = i915_forcewake_open,
	.release = i915_forcewake_release,
};

L
Lyude 已提交
4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = m->private;
	struct i915_hotplug *hotplug = &dev_priv->hotplug;

	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
	seq_printf(m, "Detected: %s\n",
		   yesno(delayed_work_pending(&hotplug->reenable_work)));

	return 0;
}

static ssize_t i915_hpd_storm_ctl_write(struct file *file,
					const char __user *ubuf, size_t len,
					loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_i915_private *dev_priv = m->private;
	struct i915_hotplug *hotplug = &dev_priv->hotplug;
	unsigned int new_threshold;
	int i;
	char *newline;
	char tmp[16];

	if (len >= sizeof(tmp))
		return -EINVAL;

	if (copy_from_user(tmp, ubuf, len))
		return -EFAULT;

	tmp[len] = '\0';

	/* Strip newline, if any */
	newline = strchr(tmp, '\n');
	if (newline)
		*newline = '\0';

	if (strcmp(tmp, "reset") == 0)
		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
		return -EINVAL;

	if (new_threshold > 0)
		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
			      new_threshold);
	else
		DRM_DEBUG_KMS("Disabling HPD storm detection\n");

	spin_lock_irq(&dev_priv->irq_lock);
	hotplug->hpd_storm_threshold = new_threshold;
	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
	for_each_hpd_pin(i)
		hotplug->stats[i].count = 0;
	spin_unlock_irq(&dev_priv->irq_lock);

	/* Re-enable hpd immediately if we were in an irq storm */
	flush_delayed_work(&dev_priv->hotplug.reenable_work);

	return len;
}

static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
}

static const struct file_operations i915_hpd_storm_ctl_fops = {
	.owner = THIS_MODULE,
	.open = i915_hpd_storm_ctl_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = i915_hpd_storm_ctl_write
};

4750
static const struct drm_info_list i915_debugfs_list[] = {
C
Chris Wilson 已提交
4751
	{"i915_capabilities", i915_capabilities, 0},
4752
	{"i915_gem_objects", i915_gem_object_info, 0},
4753
	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4754
	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
4755
	{"i915_gem_stolen", i915_gem_stolen_list_info },
4756 4757
	{"i915_gem_request", i915_gem_request_info, 0},
	{"i915_gem_seqno", i915_gem_seqno_info, 0},
4758
	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4759
	{"i915_gem_interrupt", i915_interrupt_info, 0},
4760
	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4761
	{"i915_guc_info", i915_guc_info, 0},
4762
	{"i915_guc_load_status", i915_guc_load_status_info, 0},
A
Alex Dai 已提交
4763
	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4764
	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4765
	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4766
	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4767
	{"i915_frequency_info", i915_frequency_info, 0},
4768
	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4769
	{"i915_reset_info", i915_reset_info, 0},
4770
	{"i915_drpc_info", i915_drpc_info, 0},
4771
	{"i915_emon_status", i915_emon_status, 0},
4772
	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4773
	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4774
	{"i915_fbc_status", i915_fbc_status, 0},
4775
	{"i915_ips_status", i915_ips_status, 0},
4776
	{"i915_sr_status", i915_sr_status, 0},
4777
	{"i915_opregion", i915_opregion, 0},
4778
	{"i915_vbt", i915_vbt, 0},
4779
	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4780
	{"i915_context_status", i915_context_status, 0},
4781
	{"i915_dump_lrc", i915_dump_lrc, 0},
4782
	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4783
	{"i915_swizzle_info", i915_swizzle_info, 0},
D
Daniel Vetter 已提交
4784
	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4785
	{"i915_llc", i915_llc, 0},
4786
	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4787
	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4788
	{"i915_energy_uJ", i915_energy_uJ, 0},
4789
	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4790
	{"i915_power_domain_info", i915_power_domain_info, 0},
4791
	{"i915_dmc_info", i915_dmc_info, 0},
4792
	{"i915_display_info", i915_display_info, 0},
4793
	{"i915_engine_info", i915_engine_info, 0},
B
Ben Widawsky 已提交
4794
	{"i915_semaphore_status", i915_semaphore_status, 0},
4795
	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4796
	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4797
	{"i915_wa_registers", i915_wa_registers, 0},
4798
	{"i915_ddb_info", i915_ddb_info, 0},
4799
	{"i915_sseu_status", i915_sseu_status, 0},
4800
	{"i915_drrs_status", i915_drrs_status, 0},
4801
	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4802
};
4803
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4804

4805
static const struct i915_debugfs_files {
4806 4807 4808 4809 4810 4811 4812
	const char *name;
	const struct file_operations *fops;
} i915_debugfs_files[] = {
	{"i915_wedged", &i915_wedged_fops},
	{"i915_max_freq", &i915_max_freq_fops},
	{"i915_min_freq", &i915_min_freq_fops},
	{"i915_cache_sharing", &i915_cache_sharing_fops},
4813 4814
	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4815
	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4816
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4817
	{"i915_error_state", &i915_error_state_fops},
4818
	{"i915_gpu_info", &i915_gpu_info_fops},
4819
#endif
4820
	{"i915_next_seqno", &i915_next_seqno_fops},
4821
	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4822 4823 4824
	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4825
	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4826 4827
	{"i915_dp_test_data", &i915_displayport_test_data_fops},
	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4828
	{"i915_dp_test_active", &i915_displayport_test_active_fops},
L
Lyude 已提交
4829
	{"i915_guc_log_control", &i915_guc_log_control_fops},
4830 4831
	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
	{"i915_ipc_status", &i915_ipc_status_fops}
4832 4833
};

4834
int i915_debugfs_register(struct drm_i915_private *dev_priv)
4835
{
4836
	struct drm_minor *minor = dev_priv->drm.primary;
4837
	struct dentry *ent;
4838
	int ret, i;
4839

4840 4841 4842 4843 4844
	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
				  minor->debugfs_root, to_i915(minor->dev),
				  &i915_forcewake_fops);
	if (!ent)
		return -ENOMEM;
4845

4846 4847 4848
	ret = intel_pipe_crc_create(minor);
	if (ret)
		return ret;
4849

4850
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4851 4852 4853 4854
		ent = debugfs_create_file(i915_debugfs_files[i].name,
					  S_IRUGO | S_IWUSR,
					  minor->debugfs_root,
					  to_i915(minor->dev),
4855
					  i915_debugfs_files[i].fops);
4856 4857
		if (!ent)
			return -ENOMEM;
4858
	}
4859

4860 4861
	return drm_debugfs_create_files(i915_debugfs_list,
					I915_DEBUGFS_ENTRIES,
4862 4863 4864
					minor->debugfs_root, minor);
}

4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897
struct dpcd_block {
	/* DPCD dump start address. */
	unsigned int offset;
	/* DPCD dump end address, inclusive. If unset, .size will be used. */
	unsigned int end;
	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
	size_t size;
	/* Only valid for eDP. */
	bool edp;
};

static const struct dpcd_block i915_dpcd_debug[] = {
	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
	{ .offset = DP_SET_POWER },
	{ .offset = DP_EDP_DPCD_REV },
	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
};

static int i915_dpcd_show(struct seq_file *m, void *data)
{
	struct drm_connector *connector = m->private;
	struct intel_dp *intel_dp =
		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
	uint8_t buf[16];
	ssize_t err;
	int i;

4898 4899 4900
	if (connector->status != connector_status_connected)
		return -ENODEV;

4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
		const struct dpcd_block *b = &i915_dpcd_debug[i];
		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);

		if (b->edp &&
		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		/* low tech for now */
		if (WARN_ON(size > sizeof(buf)))
			continue;

		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
		if (err <= 0) {
			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
				  size, b->offset, err);
			continue;
		}

		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4921
	}
4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938

	return 0;
}

static int i915_dpcd_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_dpcd_show, inode->i_private);
}

static const struct file_operations i915_dpcd_fops = {
	.owner = THIS_MODULE,
	.open = i915_dpcd_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972
static int i915_panel_show(struct seq_file *m, void *data)
{
	struct drm_connector *connector = m->private;
	struct intel_dp *intel_dp =
		enc_to_intel_dp(&intel_attached_encoder(connector)->base);

	if (connector->status != connector_status_connected)
		return -ENODEV;

	seq_printf(m, "Panel power up delay: %d\n",
		   intel_dp->panel_power_up_delay);
	seq_printf(m, "Panel power down delay: %d\n",
		   intel_dp->panel_power_down_delay);
	seq_printf(m, "Backlight on delay: %d\n",
		   intel_dp->backlight_on_delay);
	seq_printf(m, "Backlight off delay: %d\n",
		   intel_dp->backlight_off_delay);

	return 0;
}

static int i915_panel_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_panel_show, inode->i_private);
}

static const struct file_operations i915_panel_fops = {
	.owner = THIS_MODULE,
	.open = i915_panel_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991
/**
 * i915_debugfs_connector_add - add i915 specific connector debugfs files
 * @connector: pointer to a registered drm_connector
 *
 * Cleanup will be done by drm_connector_unregister() through a call to
 * drm_debugfs_connector_remove().
 *
 * Returns 0 on success, negative error codes on error.
 */
int i915_debugfs_connector_add(struct drm_connector *connector)
{
	struct dentry *root = connector->debugfs_entry;

	/* The connector must have been registered beforehands. */
	if (!root)
		return -ENODEV;

	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4992 4993 4994 4995 4996 4997
		debugfs_create_file("i915_dpcd", S_IRUGO, root,
				    connector, &i915_dpcd_fops);

	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
				    connector, &i915_panel_fops);
4998 4999 5000

	return 0;
}