i915_debugfs.c 135.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *
 */

29
#include <linux/debugfs.h>
30
#include <linux/list_sort.h>
31
#include "intel_drv.h"
32

33 34 35 36 37
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
	return to_i915(node->minor->dev);
}

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/* As the drm_debugfs_init() routines are called before dev->dev_private is
 * allocated we need to hook into the minor for release. */
static int
drm_add_fake_info_node(struct drm_minor *minor,
		       struct dentry *ent,
		       const void *key)
{
	struct drm_info_node *node;

	node = kmalloc(sizeof(*node), GFP_KERNEL);
	if (node == NULL) {
		debugfs_remove(ent);
		return -ENOMEM;
	}

	node->minor = minor;
	node->dent = ent;
55
	node->info_ent = (void *)key;
56 57 58 59 60 61 62 63

	mutex_lock(&minor->debugfs_lock);
	list_add(&node->list, &minor->debugfs_list);
	mutex_unlock(&minor->debugfs_lock);

	return 0;
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
static __always_inline void seq_print_param(struct seq_file *m,
					    const char *name,
					    const char *type,
					    const void *x)
{
	if (!__builtin_strcmp(type, "bool"))
		seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
	else if (!__builtin_strcmp(type, "int"))
		seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
	else if (!__builtin_strcmp(type, "unsigned int"))
		seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
	else
		BUILD_BUG();
}

79 80
static int i915_capabilities(struct seq_file *m, void *data)
{
81 82
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	const struct intel_device_info *info = INTEL_INFO(dev_priv);
83

84
	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
85
	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
86
	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
87

88
#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
89
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
90
#undef PRINT_FLAG
91

92 93 94 95 96 97
	kernel_param_lock(THIS_MODULE);
#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
	I915_PARAMS_FOR_EACH(PRINT_PARAM);
#undef PRINT_PARAM
	kernel_param_unlock(THIS_MODULE);

98 99
	return 0;
}
100

101
static char get_active_flag(struct drm_i915_gem_object *obj)
102
{
103
	return i915_gem_object_is_active(obj) ? '*' : ' ';
104 105
}

106
static char get_pin_flag(struct drm_i915_gem_object *obj)
107 108 109 110
{
	return obj->pin_display ? 'p' : ' ';
}

111
static char get_tiling_flag(struct drm_i915_gem_object *obj)
112
{
113
	switch (i915_gem_object_get_tiling(obj)) {
114
	default:
115 116 117
	case I915_TILING_NONE: return ' ';
	case I915_TILING_X: return 'X';
	case I915_TILING_Y: return 'Y';
118
	}
119 120
}

121
static char get_global_flag(struct drm_i915_gem_object *obj)
122
{
123
	return !list_empty(&obj->userfault_link) ? 'g' : ' ';
124 125
}

126
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
127
{
C
Chris Wilson 已提交
128
	return obj->mm.mapping ? 'M' : ' ';
B
Ben Widawsky 已提交
129 130
}

131 132 133 134 135
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
{
	u64 size = 0;
	struct i915_vma *vma;

136
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
137
		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
138 139 140 141 142 143
			size += vma->node.size;
	}

	return size;
}

144 145 146
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
147
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
148
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
149
	struct i915_vma *vma;
150
	unsigned int frontbuffer_bits;
B
Ben Widawsky 已提交
151 152
	int pin_count = 0;

153 154
	lockdep_assert_held(&obj->base.dev->struct_mutex);

155
	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
156
		   &obj->base,
157
		   get_active_flag(obj),
158 159
		   get_pin_flag(obj),
		   get_tiling_flag(obj),
B
Ben Widawsky 已提交
160
		   get_global_flag(obj),
161
		   get_pin_mapped_flag(obj),
162
		   obj->base.size / 1024,
163
		   obj->base.read_domains,
164
		   obj->base.write_domain,
165
		   i915_cache_level_str(dev_priv, obj->cache_level),
C
Chris Wilson 已提交
166 167
		   obj->mm.dirty ? " dirty" : "",
		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
168 169
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
170
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
171
		if (i915_vma_is_pinned(vma))
B
Ben Widawsky 已提交
172
			pin_count++;
D
Dan Carpenter 已提交
173 174
	}
	seq_printf(m, " (pinned x %d)", pin_count);
175 176
	if (obj->pin_display)
		seq_printf(m, " (display)");
177
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
178 179 180
		if (!drm_mm_node_allocated(&vma->node))
			continue;

181
		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
182
			   i915_vma_is_ggtt(vma) ? "g" : "pp",
183
			   vma->node.start, vma->node.size);
184 185 186 187 188 189 190 191
		if (i915_vma_is_ggtt(vma)) {
			switch (vma->ggtt_view.type) {
			case I915_GGTT_VIEW_NORMAL:
				seq_puts(m, ", normal");
				break;

			case I915_GGTT_VIEW_PARTIAL:
				seq_printf(m, ", partial [%08llx+%x]",
192 193
					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
					   vma->ggtt_view.partial.size << PAGE_SHIFT);
194 195 196 197
				break;

			case I915_GGTT_VIEW_ROTATED:
				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
198 199 200 201 202 203 204 205
					   vma->ggtt_view.rotated.plane[0].width,
					   vma->ggtt_view.rotated.plane[0].height,
					   vma->ggtt_view.rotated.plane[0].stride,
					   vma->ggtt_view.rotated.plane[0].offset,
					   vma->ggtt_view.rotated.plane[1].width,
					   vma->ggtt_view.rotated.plane[1].height,
					   vma->ggtt_view.rotated.plane[1].stride,
					   vma->ggtt_view.rotated.plane[1].offset);
206 207 208 209 210 211 212
				break;

			default:
				MISSING_CASE(vma->ggtt_view.type);
				break;
			}
		}
213 214 215 216
		if (vma->fence)
			seq_printf(m, " , fence: %d%s",
				   vma->fence->id,
				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
217
		seq_puts(m, ")");
B
Ben Widawsky 已提交
218
	}
219
	if (obj->stolen)
220
		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
221

222
	engine = i915_gem_object_last_write_engine(obj);
223 224 225
	if (engine)
		seq_printf(m, " (%s)", engine->name);

226 227 228
	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
	if (frontbuffer_bits)
		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
229 230
}

231 232 233 234
static int obj_rank_by_stolen(void *priv,
			      struct list_head *A, struct list_head *B)
{
	struct drm_i915_gem_object *a =
235
		container_of(A, struct drm_i915_gem_object, obj_exec_link);
236
	struct drm_i915_gem_object *b =
237
		container_of(B, struct drm_i915_gem_object, obj_exec_link);
238

R
Rasmus Villemoes 已提交
239 240 241 242 243
	if (a->stolen->start < b->stolen->start)
		return -1;
	if (a->stolen->start > b->stolen->start)
		return 1;
	return 0;
244 245 246 247
}

static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
248 249
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
250
	struct drm_i915_gem_object *obj;
251
	u64 total_obj_size, total_gtt_size;
252 253 254 255 256 257 258 259
	LIST_HEAD(stolen);
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
260
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
261 262 263
		if (obj->stolen == NULL)
			continue;

264
		list_add(&obj->obj_exec_link, &stolen);
265 266

		total_obj_size += obj->base.size;
267
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268 269
		count++;
	}
270
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
271 272 273
		if (obj->stolen == NULL)
			continue;

274
		list_add(&obj->obj_exec_link, &stolen);
275 276 277 278 279 280 281

		total_obj_size += obj->base.size;
		count++;
	}
	list_sort(NULL, &stolen, obj_rank_by_stolen);
	seq_puts(m, "Stolen:\n");
	while (!list_empty(&stolen)) {
282
		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
283 284 285
		seq_puts(m, "   ");
		describe_obj(m, obj);
		seq_putc(m, '\n');
286
		list_del_init(&obj->obj_exec_link);
287 288 289
	}
	mutex_unlock(&dev->struct_mutex);

290
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
291 292 293 294
		   count, total_obj_size, total_gtt_size);
	return 0;
}

295
struct file_stats {
296
	struct drm_i915_file_private *file_priv;
297 298 299 300
	unsigned long count;
	u64 total, unbound;
	u64 global, shared;
	u64 active, inactive;
301 302 303 304 305 306
};

static int per_file_stats(int id, void *ptr, void *data)
{
	struct drm_i915_gem_object *obj = ptr;
	struct file_stats *stats = data;
307
	struct i915_vma *vma;
308 309 310

	stats->count++;
	stats->total += obj->base.size;
311 312
	if (!obj->bind_count)
		stats->unbound += obj->base.size;
313 314 315
	if (obj->base.name || obj->base.dma_buf)
		stats->shared += obj->base.size;

316 317 318
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!drm_mm_node_allocated(&vma->node))
			continue;
319

320
		if (i915_vma_is_ggtt(vma)) {
321 322 323
			stats->global += vma->node.size;
		} else {
			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
324

325
			if (ppgtt->base.file != stats->file_priv)
326 327
				continue;
		}
328

329
		if (i915_vma_is_active(vma))
330 331 332
			stats->active += vma->node.size;
		else
			stats->inactive += vma->node.size;
333 334 335 336 337
	}

	return 0;
}

338 339
#define print_file_stats(m, name, stats) do { \
	if (stats.count) \
340
		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
341 342 343 344 345 346 347 348 349
			   name, \
			   stats.count, \
			   stats.total, \
			   stats.active, \
			   stats.inactive, \
			   stats.global, \
			   stats.shared, \
			   stats.unbound); \
} while (0)
350 351 352 353 354 355

static void print_batch_pool_stats(struct seq_file *m,
				   struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
	struct file_stats stats;
356
	struct intel_engine_cs *engine;
357
	enum intel_engine_id id;
358
	int j;
359 360 361

	memset(&stats, 0, sizeof(stats));

362
	for_each_engine(engine, dev_priv, id) {
363
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
364
			list_for_each_entry(obj,
365
					    &engine->batch_pool.cache_list[j],
366 367 368
					    batch_pool_link)
				per_file_stats(0, obj, &stats);
		}
369
	}
370

371
	print_file_stats(m, "[k]batch pool", stats);
372 373
}

374 375 376 377 378 379 380
static int per_file_ctx_stats(int id, void *ptr, void *data)
{
	struct i915_gem_context *ctx = ptr;
	int n;

	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
		if (ctx->engine[n].state)
381
			per_file_stats(0, ctx->engine[n].state->obj, data);
382
		if (ctx->engine[n].ring)
383
			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
384 385 386 387 388 389 390 391
	}

	return 0;
}

static void print_context_stats(struct seq_file *m,
				struct drm_i915_private *dev_priv)
{
392
	struct drm_device *dev = &dev_priv->drm;
393 394 395 396 397
	struct file_stats stats;
	struct drm_file *file;

	memset(&stats, 0, sizeof(stats));

398
	mutex_lock(&dev->struct_mutex);
399 400 401
	if (dev_priv->kernel_context)
		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);

402
	list_for_each_entry(file, &dev->filelist, lhead) {
403 404 405
		struct drm_i915_file_private *fpriv = file->driver_priv;
		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
	}
406
	mutex_unlock(&dev->struct_mutex);
407 408 409 410

	print_file_stats(m, "[k]contexts", stats);
}

411
static int i915_gem_object_info(struct seq_file *m, void *data)
412
{
413 414
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
415
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
416 417
	u32 count, mapped_count, purgeable_count, dpy_count;
	u64 size, mapped_size, purgeable_size, dpy_size;
418
	struct drm_i915_gem_object *obj;
419
	struct drm_file *file;
420 421 422 423 424 425
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

426
	seq_printf(m, "%u objects, %llu bytes\n",
427 428 429
		   dev_priv->mm.object_count,
		   dev_priv->mm.object_memory);

430 431 432
	size = count = 0;
	mapped_size = mapped_count = 0;
	purgeable_size = purgeable_count = 0;
433
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
434 435 436
		size += obj->base.size;
		++count;

C
Chris Wilson 已提交
437
		if (obj->mm.madv == I915_MADV_DONTNEED) {
438 439 440 441
			purgeable_size += obj->base.size;
			++purgeable_count;
		}

C
Chris Wilson 已提交
442
		if (obj->mm.mapping) {
443 444
			mapped_count++;
			mapped_size += obj->base.size;
445
		}
446
	}
447
	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
C
Chris Wilson 已提交
448

449
	size = count = dpy_size = dpy_count = 0;
450
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
451 452 453
		size += obj->base.size;
		++count;

454
		if (obj->pin_display) {
455 456
			dpy_size += obj->base.size;
			++dpy_count;
457
		}
458

C
Chris Wilson 已提交
459
		if (obj->mm.madv == I915_MADV_DONTNEED) {
460 461 462
			purgeable_size += obj->base.size;
			++purgeable_count;
		}
463

C
Chris Wilson 已提交
464
		if (obj->mm.mapping) {
465 466
			mapped_count++;
			mapped_size += obj->base.size;
467
		}
468
	}
469 470
	seq_printf(m, "%u bound objects, %llu bytes\n",
		   count, size);
471
	seq_printf(m, "%u purgeable objects, %llu bytes\n",
472
		   purgeable_count, purgeable_size);
473 474 475 476
	seq_printf(m, "%u mapped objects, %llu bytes\n",
		   mapped_count, mapped_size);
	seq_printf(m, "%u display objects (pinned), %llu bytes\n",
		   dpy_count, dpy_size);
477

478
	seq_printf(m, "%llu [%llu] gtt total\n",
479
		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
480

481 482
	seq_putc(m, '\n');
	print_batch_pool_stats(m, dev_priv);
483 484 485
	mutex_unlock(&dev->struct_mutex);

	mutex_lock(&dev->filelist_mutex);
486
	print_context_stats(m, dev_priv);
487 488
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct file_stats stats;
489 490
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct drm_i915_gem_request *request;
491
		struct task_struct *task;
492 493

		memset(&stats, 0, sizeof(stats));
494
		stats.file_priv = file->driver_priv;
495
		spin_lock(&file->table_lock);
496
		idr_for_each(&file->object_idr, per_file_stats, &stats);
497
		spin_unlock(&file->table_lock);
498 499 500 501 502 503
		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
504 505 506 507
		mutex_lock(&dev->struct_mutex);
		request = list_first_entry_or_null(&file_priv->mm.request_list,
						   struct drm_i915_gem_request,
						   client_list);
508
		rcu_read_lock();
509 510 511
		task = pid_task(request && request->ctx->pid ?
				request->ctx->pid : file->pid,
				PIDTYPE_PID);
512
		print_file_stats(m, task ? task->comm : "<unknown>", stats);
513
		rcu_read_unlock();
514
		mutex_unlock(&dev->struct_mutex);
515
	}
516
	mutex_unlock(&dev->filelist_mutex);
517 518 519 520

	return 0;
}

521
static int i915_gem_gtt_info(struct seq_file *m, void *data)
522
{
523
	struct drm_info_node *node = m->private;
524 525
	struct drm_i915_private *dev_priv = node_to_i915(node);
	struct drm_device *dev = &dev_priv->drm;
526
	bool show_pin_display_only = !!node->info_ent->data;
527
	struct drm_i915_gem_object *obj;
528
	u64 total_obj_size, total_gtt_size;
529 530 531 532 533 534 535
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
536
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
537
		if (show_pin_display_only && !obj->pin_display)
538 539
			continue;

540
		seq_puts(m, "   ");
541
		describe_obj(m, obj);
542
		seq_putc(m, '\n');
543
		total_obj_size += obj->base.size;
544
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
545 546 547 548 549
		count++;
	}

	mutex_unlock(&dev->struct_mutex);

550
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
551 552 553 554 555
		   count, total_obj_size, total_gtt_size);

	return 0;
}

556 557
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
558 559
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
560
	struct intel_crtc *crtc;
561 562 563 564 565
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
566

567
	for_each_intel_crtc(dev, crtc) {
568 569
		const char pipe = pipe_name(crtc->pipe);
		const char plane = plane_name(crtc->plane);
570
		struct intel_flip_work *work;
571

572
		spin_lock_irq(&dev->event_lock);
573 574
		work = crtc->flip_work;
		if (work == NULL) {
575
			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
576 577
				   pipe, plane);
		} else {
578 579 580 581 582 583 584 585 586 587 588 589
			u32 pending;
			u32 addr;

			pending = atomic_read(&work->pending);
			if (pending) {
				seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
					   pipe, plane);
			} else {
				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
					   pipe, plane);
			}
			if (work->flip_queued_req) {
590
				struct intel_engine_cs *engine = work->flip_queued_req->engine;
591

592
				seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
593
					   engine->name,
594
					   work->flip_queued_req->global_seqno,
595
					   intel_engine_last_submit(engine),
596
					   intel_engine_get_seqno(engine),
597
					   i915_gem_request_completed(work->flip_queued_req));
598 599 600 601 602 603 604 605
			} else
				seq_printf(m, "Flip not associated with any ring\n");
			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
				   work->flip_queued_vblank,
				   work->flip_ready_vblank,
				   intel_crtc_get_vblank_counter(crtc));
			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));

606
			if (INTEL_GEN(dev_priv) >= 4)
607 608 609 610 611 612 613 614
				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
			else
				addr = I915_READ(DSPADDR(crtc->plane));
			seq_printf(m, "Current scanout address 0x%08x\n", addr);

			if (work->pending_flip_obj) {
				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
615 616
			}
		}
617
		spin_unlock_irq(&dev->event_lock);
618 619
	}

620 621
	mutex_unlock(&dev->struct_mutex);

622 623 624
	return 0;
}

625 626
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
627 628
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
629
	struct drm_i915_gem_object *obj;
630
	struct intel_engine_cs *engine;
631
	enum intel_engine_id id;
632
	int total = 0;
633
	int ret, j;
634 635 636 637 638

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

639
	for_each_engine(engine, dev_priv, id) {
640
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
641 642 643 644
			int count;

			count = 0;
			list_for_each_entry(obj,
645
					    &engine->batch_pool.cache_list[j],
646 647 648
					    batch_pool_link)
				count++;
			seq_printf(m, "%s cache[%d]: %d objects\n",
649
				   engine->name, j, count);
650 651

			list_for_each_entry(obj,
652
					    &engine->batch_pool.cache_list[j],
653 654 655 656 657 658 659
					    batch_pool_link) {
				seq_puts(m, "   ");
				describe_obj(m, obj);
				seq_putc(m, '\n');
			}

			total += count;
660
		}
661 662
	}

663
	seq_printf(m, "total: %d\n", total);
664 665 666 667 668 669

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

670 671 672 673
static void print_request(struct seq_file *m,
			  struct drm_i915_gem_request *rq,
			  const char *prefix)
{
674
	seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
675
		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
676
		   rq->priotree.priority,
677
		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
678
		   rq->timeline->common->name);
679 680
}

681 682
static int i915_gem_request_info(struct seq_file *m, void *data)
{
683 684
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
D
Daniel Vetter 已提交
685
	struct drm_i915_gem_request *req;
686 687
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
688
	int ret, any;
689 690 691 692

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
693

694
	any = 0;
695
	for_each_engine(engine, dev_priv, id) {
696 697 698
		int count;

		count = 0;
699
		list_for_each_entry(req, &engine->timeline->requests, link)
700 701
			count++;
		if (count == 0)
702 703
			continue;

704
		seq_printf(m, "%s requests: %d\n", engine->name, count);
705
		list_for_each_entry(req, &engine->timeline->requests, link)
706
			print_request(m, req, "    ");
707 708

		any++;
709
	}
710 711
	mutex_unlock(&dev->struct_mutex);

712
	if (any == 0)
713
		seq_puts(m, "No requests\n");
714

715 716 717
	return 0;
}

718
static void i915_ring_seqno_info(struct seq_file *m,
719
				 struct intel_engine_cs *engine)
720
{
721 722 723
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
	struct rb_node *rb;

724
	seq_printf(m, "Current sequence (%s): %x\n",
725
		   engine->name, intel_engine_get_seqno(engine));
726

727
	spin_lock_irq(&b->lock);
728
	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
G
Geliang Tang 已提交
729
		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
730 731 732 733

		seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
			   engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
	}
734
	spin_unlock_irq(&b->lock);
735 736
}

737 738
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
739
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
740
	struct intel_engine_cs *engine;
741
	enum intel_engine_id id;
742

743
	for_each_engine(engine, dev_priv, id)
744
		i915_ring_seqno_info(m, engine);
745

746 747 748 749 750 751
	return 0;
}


static int i915_interrupt_info(struct seq_file *m, void *data)
{
752
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
753
	struct intel_engine_cs *engine;
754
	enum intel_engine_id id;
755
	int i, pipe;
756

757
	intel_runtime_pm_get(dev_priv);
758

759
	if (IS_CHERRYVIEW(dev_priv)) {
760 761 762 763 764 765 766 767 768 769 770
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
771 772 773 774 775 776 777 778 779 780 781
		for_each_pipe(dev_priv, pipe) {
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}

782 783 784 785
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

786 787 788 789
			intel_display_power_put(dev_priv, power_domain);
		}

		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
790 791 792 793 794 795
		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));
796
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
813
	} else if (INTEL_GEN(dev_priv) >= 8) {
814 815 816 817 818 819 820 821 822 823 824 825
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

826
		for_each_pipe(dev_priv, pipe) {
827 828 829 830 831
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
832 833 834 835
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}
836
			seq_printf(m, "Pipe %c IMR:\t%08x\n",
837 838
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
839
			seq_printf(m, "Pipe %c IIR:\t%08x\n",
840 841
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
842
			seq_printf(m, "Pipe %c IER:\t%08x\n",
843 844
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
845 846

			intel_display_power_put(dev_priv, power_domain);
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
		}

		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IMR));
		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IIR));
		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IER));

		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IMR));
		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IIR));
		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IER));

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
869
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
870 871 872 873 874 875 876 877
		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
878 879 880 881 882 883 884 885 886 887 888
		for_each_pipe(dev_priv, pipe) {
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}

J
Jesse Barnes 已提交
889 890 891
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
892 893
			intel_display_power_put(dev_priv, power_domain);
		}
J
Jesse Barnes 已提交
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918

		seq_printf(m, "Master IER:\t%08x\n",
			   I915_READ(VLV_MASTER_IER));

		seq_printf(m, "Render IER:\t%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Render IIR:\t%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Render IMR:\t%08x\n",
			   I915_READ(GTIMR));

		seq_printf(m, "PM IER:\t\t%08x\n",
			   I915_READ(GEN6_PMIER));
		seq_printf(m, "PM IIR:\t\t%08x\n",
			   I915_READ(GEN6_PMIIR));
		seq_printf(m, "PM IMR:\t\t%08x\n",
			   I915_READ(GEN6_PMIMR));

		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));

919
	} else if (!HAS_PCH_SPLIT(dev_priv)) {
920 921 922 923 924 925
		seq_printf(m, "Interrupt enable:    %08x\n",
			   I915_READ(IER));
		seq_printf(m, "Interrupt identity:  %08x\n",
			   I915_READ(IIR));
		seq_printf(m, "Interrupt mask:      %08x\n",
			   I915_READ(IMR));
926
		for_each_pipe(dev_priv, pipe)
927 928 929
			seq_printf(m, "Pipe %c stat:         %08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
	} else {
		seq_printf(m, "North Display Interrupt enable:		%08x\n",
			   I915_READ(DEIER));
		seq_printf(m, "North Display Interrupt identity:	%08x\n",
			   I915_READ(DEIIR));
		seq_printf(m, "North Display Interrupt mask:		%08x\n",
			   I915_READ(DEIMR));
		seq_printf(m, "South Display Interrupt enable:		%08x\n",
			   I915_READ(SDEIER));
		seq_printf(m, "South Display Interrupt identity:	%08x\n",
			   I915_READ(SDEIIR));
		seq_printf(m, "South Display Interrupt mask:		%08x\n",
			   I915_READ(SDEIMR));
		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
			   I915_READ(GTIMR));
	}
950
	for_each_engine(engine, dev_priv, id) {
951
		if (INTEL_GEN(dev_priv) >= 6) {
952 953
			seq_printf(m,
				   "Graphics Interrupt mask (%s):	%08x\n",
954
				   engine->name, I915_READ_IMR(engine));
955
		}
956
		i915_ring_seqno_info(m, engine);
957
	}
958
	intel_runtime_pm_put(dev_priv);
959

960 961 962
	return 0;
}

963 964
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
965 966
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
967 968 969 970 971
	int i, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
972 973 974

	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
975
		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
976

C
Chris Wilson 已提交
977 978
		seq_printf(m, "Fence %d, pin count = %d, object = ",
			   i, dev_priv->fence_regs[i].pin_count);
979
		if (!vma)
980
			seq_puts(m, "unused");
981
		else
982
			describe_obj(m, vma->obj);
983
		seq_putc(m, '\n');
984 985
	}

986
	mutex_unlock(&dev->struct_mutex);
987 988 989
	return 0;
}

990 991
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)

992 993 994 995 996 997
static ssize_t
i915_error_state_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
{
998
	struct i915_error_state_file_priv *error_priv = filp->private_data;
999 1000

	DRM_DEBUG_DRIVER("Resetting error state\n");
1001
	i915_destroy_error_state(error_priv->i915);
1002 1003 1004 1005 1006 1007

	return cnt;
}

static int i915_error_state_open(struct inode *inode, struct file *file)
{
1008
	struct drm_i915_private *dev_priv = inode->i_private;
1009 1010 1011 1012 1013 1014
	struct i915_error_state_file_priv *error_priv;

	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
	if (!error_priv)
		return -ENOMEM;

1015
	error_priv->i915 = dev_priv;
1016

1017
	i915_error_state_get(&dev_priv->drm, error_priv);
1018

1019 1020 1021
	file->private_data = error_priv;

	return 0;
1022 1023 1024 1025
}

static int i915_error_state_release(struct inode *inode, struct file *file)
{
1026
	struct i915_error_state_file_priv *error_priv = file->private_data;
1027

1028
	i915_error_state_put(error_priv);
1029 1030
	kfree(error_priv);

1031 1032 1033
	return 0;
}

1034 1035 1036 1037 1038 1039 1040 1041 1042
static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
				     size_t count, loff_t *pos)
{
	struct i915_error_state_file_priv *error_priv = file->private_data;
	struct drm_i915_error_state_buf error_str;
	loff_t tmp_pos = 0;
	ssize_t ret_count = 0;
	int ret;

1043 1044
	ret = i915_error_state_buf_init(&error_str, error_priv->i915,
					count, *pos);
1045 1046
	if (ret)
		return ret;
1047

1048
	ret = i915_error_state_to_str(&error_str, error_priv);
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	if (ret)
		goto out;

	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
					    error_str.buf,
					    error_str.bytes);

	if (ret_count < 0)
		ret = ret_count;
	else
		*pos = error_str.start + ret_count;
out:
1061
	i915_error_state_buf_release(&error_str);
1062
	return ret ?: ret_count;
1063 1064 1065 1066 1067
}

static const struct file_operations i915_error_state_fops = {
	.owner = THIS_MODULE,
	.open = i915_error_state_open,
1068
	.read = i915_error_state_read,
1069 1070 1071 1072 1073
	.write = i915_error_state_write,
	.llseek = default_llseek,
	.release = i915_error_state_release,
};

1074 1075
#endif

1076 1077
static int
i915_next_seqno_get(void *data, u64 *val)
1078
{
1079
	struct drm_i915_private *dev_priv = data;
1080

1081
	*val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
1082
	return 0;
1083 1084
}

1085 1086 1087
static int
i915_next_seqno_set(void *data, u64 val)
{
1088 1089
	struct drm_i915_private *dev_priv = data;
	struct drm_device *dev = &dev_priv->drm;
1090 1091 1092 1093 1094 1095
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1096
	ret = i915_gem_set_global_seqno(dev, val);
1097 1098
	mutex_unlock(&dev->struct_mutex);

1099
	return ret;
1100 1101
}

1102 1103
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
			i915_next_seqno_get, i915_next_seqno_set,
1104
			"0x%llx\n");
1105

1106
static int i915_frequency_info(struct seq_file *m, void *unused)
1107
{
1108 1109
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1110 1111 1112
	int ret = 0;

	intel_runtime_pm_get(dev_priv);
1113

1114
	if (IS_GEN5(dev_priv)) {
1115 1116 1117 1118 1119 1120 1121 1122 1123
		u16 rgvswctl = I915_READ16(MEMSWCTL);
		u16 rgvstat = I915_READ16(MEMSTAT_ILK);

		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
			   MEMSTAT_VID_SHIFT);
		seq_printf(m, "Current P-state: %d\n",
			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1124
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
		u32 freq_sts;

		mutex_lock(&dev_priv->rps.hw_lock);
		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);

		seq_printf(m, "actual GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));

		seq_printf(m, "current GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));

		seq_printf(m, "max GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));

		seq_printf(m, "min GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));

		seq_printf(m, "idle GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));

		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
		mutex_unlock(&dev_priv->rps.hw_lock);
1151
	} else if (INTEL_GEN(dev_priv) >= 6) {
1152 1153 1154
		u32 rp_state_limits;
		u32 gt_perf_status;
		u32 rp_state_cap;
1155
		u32 rpmodectl, rpinclimit, rpdeclimit;
1156
		u32 rpstat, cagf, reqf;
1157 1158
		u32 rpupei, rpcurup, rpprevup;
		u32 rpdownei, rpcurdown, rpprevdown;
1159
		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1160 1161
		int max_freq;

1162
		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1163
		if (IS_GEN9_LP(dev_priv)) {
1164 1165 1166 1167 1168 1169 1170
			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
		} else {
			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
		}

1171
		/* RPSTAT1 is in the GT power well */
1172 1173
		ret = mutex_lock_interruptible(&dev->struct_mutex);
		if (ret)
1174
			goto out;
1175

1176
		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1177

1178
		reqf = I915_READ(GEN6_RPNSWREQ);
1179
		if (IS_GEN9(dev_priv))
1180 1181 1182
			reqf >>= 23;
		else {
			reqf &= ~GEN6_TURBO_DISABLE;
1183
			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1184 1185 1186 1187
				reqf >>= 24;
			else
				reqf >>= 25;
		}
1188
		reqf = intel_gpu_freq(dev_priv, reqf);
1189

1190 1191 1192 1193
		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);

1194
		rpstat = I915_READ(GEN6_RPSTAT1);
1195 1196 1197 1198 1199 1200
		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1201
		if (IS_GEN9(dev_priv))
1202
			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1203
		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
B
Ben Widawsky 已提交
1204 1205 1206
			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
		else
			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1207
		cagf = intel_gpu_freq(dev_priv, cagf);
1208

1209
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1210 1211
		mutex_unlock(&dev->struct_mutex);

1212
		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
			pm_ier = I915_READ(GEN6_PMIER);
			pm_imr = I915_READ(GEN6_PMIMR);
			pm_isr = I915_READ(GEN6_PMISR);
			pm_iir = I915_READ(GEN6_PMIIR);
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		} else {
			pm_ier = I915_READ(GEN8_GT_IER(2));
			pm_imr = I915_READ(GEN8_GT_IMR(2));
			pm_isr = I915_READ(GEN8_GT_ISR(2));
			pm_iir = I915_READ(GEN8_GT_IIR(2));
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		}
1225
		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1226
			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1227
		seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
1228 1229
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "Render p-state ratio: %d\n",
1230
			   (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
1231 1232 1233 1234
		seq_printf(m, "Render p-state VID: %d\n",
			   gt_perf_status & 0xff);
		seq_printf(m, "Render p-state limit: %d\n",
			   rp_state_limits & 0xff);
1235 1236 1237 1238
		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1239
		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
B
Ben Widawsky 已提交
1240
		seq_printf(m, "CAGF: %dMHz\n", cagf);
1241 1242 1243 1244 1245 1246
		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
		seq_printf(m, "RP CUR UP: %d (%dus)\n",
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1247 1248 1249
		seq_printf(m, "Up threshold: %d%%\n",
			   dev_priv->rps.up_threshold);

1250 1251 1252 1253 1254 1255
		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1256 1257
		seq_printf(m, "Down threshold: %d%%\n",
			   dev_priv->rps.down_threshold);
1258

1259
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1260
			    rp_state_cap >> 16) & 0xff;
1261
		max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
1262
		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1263
			   intel_gpu_freq(dev_priv, max_freq));
1264 1265

		max_freq = (rp_state_cap & 0xff00) >> 8;
1266
		max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
1267
		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1268
			   intel_gpu_freq(dev_priv, max_freq));
1269

1270
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1271
			    rp_state_cap >> 0) & 0xff;
1272
		max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
1273
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1274
			   intel_gpu_freq(dev_priv, max_freq));
1275
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1276
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1277

1278 1279 1280
		seq_printf(m, "Current freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1281 1282
		seq_printf(m, "Idle freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1283 1284
		seq_printf(m, "Min freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1285 1286
		seq_printf(m, "Boost freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1287 1288 1289 1290 1291
		seq_printf(m, "Max freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1292
	} else {
1293
		seq_puts(m, "no P-state info available\n");
1294
	}
1295

1296
	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1297 1298 1299
	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);

1300 1301 1302
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1303 1304
}

1305 1306 1307 1308
static void i915_instdone_info(struct drm_i915_private *dev_priv,
			       struct seq_file *m,
			       struct intel_instdone *instdone)
{
1309 1310 1311
	int slice;
	int subslice;

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
		   instdone->instdone);

	if (INTEL_GEN(dev_priv) <= 3)
		return;

	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
		   instdone->slice_common);

	if (INTEL_GEN(dev_priv) <= 6)
		return;

1324 1325 1326 1327 1328 1329 1330
	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
			   slice, subslice, instdone->sampler[slice][subslice]);

	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
			   slice, subslice, instdone->row[slice][subslice]);
1331 1332
}

1333 1334
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
1335
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1336
	struct intel_engine_cs *engine;
1337 1338
	u64 acthd[I915_NUM_ENGINES];
	u32 seqno[I915_NUM_ENGINES];
1339
	struct intel_instdone instdone;
1340
	enum intel_engine_id id;
1341

1342 1343 1344 1345 1346 1347 1348 1349 1350
	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
		seq_printf(m, "Wedged\n");
	if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
		seq_printf(m, "Reset in progress\n");
	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
		seq_printf(m, "Waiter holding struct mutex\n");
	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
		seq_printf(m, "struct_mutex blocked for reset\n");

1351 1352 1353 1354 1355
	if (!i915.enable_hangcheck) {
		seq_printf(m, "Hangcheck disabled\n");
		return 0;
	}

1356 1357
	intel_runtime_pm_get(dev_priv);

1358
	for_each_engine(engine, dev_priv, id) {
1359
		acthd[id] = intel_engine_get_active_head(engine);
1360
		seqno[id] = intel_engine_get_seqno(engine);
1361 1362
	}

1363
	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1364

1365 1366
	intel_runtime_pm_put(dev_priv);

1367 1368 1369 1370 1371 1372 1373
	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
		seq_printf(m, "Hangcheck active, fires in %dms\n",
			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
					    jiffies));
	} else
		seq_printf(m, "Hangcheck inactive\n");

1374
	for_each_engine(engine, dev_priv, id) {
1375 1376 1377
		struct intel_breadcrumbs *b = &engine->breadcrumbs;
		struct rb_node *rb;

1378
		seq_printf(m, "%s:\n", engine->name);
1379
		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1380 1381
			   engine->hangcheck.seqno, seqno[id],
			   intel_engine_last_submit(engine));
1382
		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1383 1384
			   yesno(intel_engine_has_waiter(engine)),
			   yesno(test_bit(engine->id,
1385 1386 1387
					  &dev_priv->gpu_error.missed_irq_rings)),
			   yesno(engine->hangcheck.stalled));

1388
		spin_lock_irq(&b->lock);
1389
		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
G
Geliang Tang 已提交
1390
			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1391 1392 1393 1394

			seq_printf(m, "\t%s [%d] waiting for %x\n",
				   w->tsk->comm, w->tsk->pid, w->seqno);
		}
1395
		spin_unlock_irq(&b->lock);
1396

1397
		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1398
			   (long long)engine->hangcheck.acthd,
1399
			   (long long)acthd[id]);
1400 1401 1402 1403 1404
		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
			   hangcheck_action_to_str(engine->hangcheck.action),
			   engine->hangcheck.action,
			   jiffies_to_msecs(jiffies -
					    engine->hangcheck.action_timestamp));
1405

1406
		if (engine->id == RCS) {
1407
			seq_puts(m, "\tinstdone read =\n");
1408

1409
			i915_instdone_info(dev_priv, m, &instdone);
1410

1411
			seq_puts(m, "\tinstdone accu =\n");
1412

1413 1414
			i915_instdone_info(dev_priv, m,
					   &engine->hangcheck.instdone);
1415
		}
1416 1417 1418 1419 1420
	}

	return 0;
}

1421
static int ironlake_drpc_info(struct seq_file *m)
1422
{
1423
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424 1425 1426
	u32 rgvmodectl, rstdbyctl;
	u16 crstandvid;

1427
	intel_runtime_pm_get(dev_priv);
1428 1429 1430 1431 1432

	rgvmodectl = I915_READ(MEMMODECTL);
	rstdbyctl = I915_READ(RSTDBYCTL);
	crstandvid = I915_READ16(CRSTANDVID);

1433
	intel_runtime_pm_put(dev_priv);
1434

1435
	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1436 1437 1438 1439
	seq_printf(m, "Boost freq: %d\n",
		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
		   MEMMODE_BOOST_FREQ_SHIFT);
	seq_printf(m, "HW control enabled: %s\n",
1440
		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1441
	seq_printf(m, "SW control enabled: %s\n",
1442
		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1443
	seq_printf(m, "Gated voltage change: %s\n",
1444
		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1445 1446
	seq_printf(m, "Starting frequency: P%d\n",
		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1447
	seq_printf(m, "Max P-state: P%d\n",
1448
		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1449 1450 1451 1452
	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
	seq_printf(m, "Render standby enabled: %s\n",
1453
		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1454
	seq_puts(m, "Current RS state: ");
1455 1456
	switch (rstdbyctl & RSX_STATUS_MASK) {
	case RSX_STATUS_ON:
1457
		seq_puts(m, "on\n");
1458 1459
		break;
	case RSX_STATUS_RC1:
1460
		seq_puts(m, "RC1\n");
1461 1462
		break;
	case RSX_STATUS_RC1E:
1463
		seq_puts(m, "RC1E\n");
1464 1465
		break;
	case RSX_STATUS_RS1:
1466
		seq_puts(m, "RS1\n");
1467 1468
		break;
	case RSX_STATUS_RS2:
1469
		seq_puts(m, "RS2 (RC6)\n");
1470 1471
		break;
	case RSX_STATUS_RS3:
1472
		seq_puts(m, "RC3 (RC6+)\n");
1473 1474
		break;
	default:
1475
		seq_puts(m, "unknown\n");
1476 1477
		break;
	}
1478 1479 1480 1481

	return 0;
}

1482
static int i915_forcewake_domains(struct seq_file *m, void *data)
1483
{
1484
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1485 1486 1487
	struct intel_uncore_forcewake_domain *fw_domain;

	spin_lock_irq(&dev_priv->uncore.lock);
1488
	for_each_fw_domain(fw_domain, dev_priv) {
1489
		seq_printf(m, "%s.wake_count = %u\n",
1490
			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1491 1492 1493
			   fw_domain->wake_count);
	}
	spin_unlock_irq(&dev_priv->uncore.lock);
1494

1495 1496 1497 1498 1499
	return 0;
}

static int vlv_drpc_info(struct seq_file *m)
{
1500
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1501
	u32 rpmodectl1, rcctl1, pw_status;
1502

1503 1504
	intel_runtime_pm_get(dev_priv);

1505
	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1506 1507 1508
	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);

1509 1510
	intel_runtime_pm_put(dev_priv);

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "Turbo enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
					GEN6_RC_CTL_EI_MODE(1))));
	seq_printf(m, "Render Power Well: %s\n",
1524
		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1525
	seq_printf(m, "Media Power Well: %s\n",
1526
		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1527

1528 1529 1530 1531 1532
	seq_printf(m, "Render RC6 residency since boot: %u\n",
		   I915_READ(VLV_GT_RENDER_RC6));
	seq_printf(m, "Media RC6 residency since boot: %u\n",
		   I915_READ(VLV_GT_MEDIA_RC6));

1533
	return i915_forcewake_domains(m, NULL);
1534 1535
}

1536 1537
static int gen6_drpc_info(struct seq_file *m)
{
1538 1539
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
B
Ben Widawsky 已提交
1540
	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1541
	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1542
	unsigned forcewake_count;
1543
	int count = 0, ret;
1544 1545 1546 1547

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1548
	intel_runtime_pm_get(dev_priv);
1549

1550
	spin_lock_irq(&dev_priv->uncore.lock);
1551
	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1552
	spin_unlock_irq(&dev_priv->uncore.lock);
1553 1554

	if (forcewake_count) {
1555 1556
		seq_puts(m, "RC information inaccurate because somebody "
			    "holds a forcewake reference \n");
1557 1558 1559 1560 1561 1562 1563
	} else {
		/* NB: we cannot use forcewake, else we read the wrong values */
		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
			udelay(10);
		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
	}

1564
	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1565
	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1566 1567 1568

	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1569
	if (INTEL_GEN(dev_priv) >= 9) {
1570 1571 1572
		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
	}
1573
	mutex_unlock(&dev->struct_mutex);
1574 1575 1576
	mutex_lock(&dev_priv->rps.hw_lock);
	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
	mutex_unlock(&dev_priv->rps.hw_lock);
1577

1578 1579
	intel_runtime_pm_put(dev_priv);

1580 1581 1582 1583 1584 1585 1586
	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
1587
	seq_printf(m, "RC1e Enabled: %s\n",
1588 1589 1590
		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1591
	if (INTEL_GEN(dev_priv) >= 9) {
1592 1593 1594 1595 1596
		seq_printf(m, "Render Well Gating Enabled: %s\n",
			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
		seq_printf(m, "Media Well Gating Enabled: %s\n",
			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
	}
1597 1598 1599 1600
	seq_printf(m, "Deep RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
	seq_printf(m, "Deepest RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1601
	seq_puts(m, "Current RC state: ");
1602 1603 1604
	switch (gt_core_status & GEN6_RCn_MASK) {
	case GEN6_RC0:
		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1605
			seq_puts(m, "Core Power Down\n");
1606
		else
1607
			seq_puts(m, "on\n");
1608 1609
		break;
	case GEN6_RC3:
1610
		seq_puts(m, "RC3\n");
1611 1612
		break;
	case GEN6_RC6:
1613
		seq_puts(m, "RC6\n");
1614 1615
		break;
	case GEN6_RC7:
1616
		seq_puts(m, "RC7\n");
1617 1618
		break;
	default:
1619
		seq_puts(m, "Unknown\n");
1620 1621 1622 1623 1624
		break;
	}

	seq_printf(m, "Core Power Down: %s\n",
		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1625
	if (INTEL_GEN(dev_priv) >= 9) {
1626 1627 1628 1629 1630 1631 1632
		seq_printf(m, "Render Power Well: %s\n",
			(gen9_powergate_status &
			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
		seq_printf(m, "Media Power Well: %s\n",
			(gen9_powergate_status &
			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
	}
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643

	/* Not exactly sure what this is */
	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
	seq_printf(m, "RC6 residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6));
	seq_printf(m, "RC6+ residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6p));
	seq_printf(m, "RC6++ residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6pp));

B
Ben Widawsky 已提交
1644 1645 1646 1647 1648 1649
	seq_printf(m, "RC6   voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
	seq_printf(m, "RC6+  voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
	seq_printf(m, "RC6++ voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1650
	return i915_forcewake_domains(m, NULL);
1651 1652 1653 1654
}

static int i915_drpc_info(struct seq_file *m, void *unused)
{
1655
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1656

1657
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1658
		return vlv_drpc_info(m);
1659
	else if (INTEL_GEN(dev_priv) >= 6)
1660 1661 1662 1663 1664
		return gen6_drpc_info(m);
	else
		return ironlake_drpc_info(m);
}

1665 1666
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
1667
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677

	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
		   dev_priv->fb_tracking.busy_bits);

	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
		   dev_priv->fb_tracking.flip_bits);

	return 0;
}

1678 1679
static int i915_fbc_status(struct seq_file *m, void *unused)
{
1680
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1681

1682
	if (!HAS_FBC(dev_priv)) {
1683
		seq_puts(m, "FBC unsupported on this chipset\n");
1684 1685 1686
		return 0;
	}

1687
	intel_runtime_pm_get(dev_priv);
P
Paulo Zanoni 已提交
1688
	mutex_lock(&dev_priv->fbc.lock);
1689

1690
	if (intel_fbc_is_active(dev_priv))
1691
		seq_puts(m, "FBC enabled\n");
1692 1693
	else
		seq_printf(m, "FBC disabled: %s\n",
1694
			   dev_priv->fbc.no_fbc_reason);
1695

1696 1697 1698 1699
	if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
		uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
				BDW_FBC_COMPRESSION_MASK :
				IVB_FBC_COMPRESSION_MASK;
1700
		seq_printf(m, "Compressing: %s\n",
1701 1702
			   yesno(I915_READ(FBC_STATUS2) & mask));
	}
1703

P
Paulo Zanoni 已提交
1704
	mutex_unlock(&dev_priv->fbc.lock);
1705 1706
	intel_runtime_pm_put(dev_priv);

1707 1708 1709
	return 0;
}

1710 1711
static int i915_fbc_fc_get(void *data, u64 *val)
{
1712
	struct drm_i915_private *dev_priv = data;
1713

1714
	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1715 1716 1717 1718 1719 1720 1721 1722 1723
		return -ENODEV;

	*val = dev_priv->fbc.false_color;

	return 0;
}

static int i915_fbc_fc_set(void *data, u64 val)
{
1724
	struct drm_i915_private *dev_priv = data;
1725 1726
	u32 reg;

1727
	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1728 1729
		return -ENODEV;

P
Paulo Zanoni 已提交
1730
	mutex_lock(&dev_priv->fbc.lock);
1731 1732 1733 1734 1735 1736 1737 1738

	reg = I915_READ(ILK_DPFC_CONTROL);
	dev_priv->fbc.false_color = val;

	I915_WRITE(ILK_DPFC_CONTROL, val ?
		   (reg | FBC_CTL_FALSE_COLOR) :
		   (reg & ~FBC_CTL_FALSE_COLOR));

P
Paulo Zanoni 已提交
1739
	mutex_unlock(&dev_priv->fbc.lock);
1740 1741 1742 1743 1744 1745 1746
	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
			i915_fbc_fc_get, i915_fbc_fc_set,
			"%llu\n");

1747 1748
static int i915_ips_status(struct seq_file *m, void *unused)
{
1749
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1750

1751
	if (!HAS_IPS(dev_priv)) {
1752 1753 1754 1755
		seq_puts(m, "not supported\n");
		return 0;
	}

1756 1757
	intel_runtime_pm_get(dev_priv);

1758 1759 1760
	seq_printf(m, "Enabled by kernel parameter: %s\n",
		   yesno(i915.enable_ips));

1761
	if (INTEL_GEN(dev_priv) >= 8) {
1762 1763 1764 1765 1766 1767 1768
		seq_puts(m, "Currently: unknown\n");
	} else {
		if (I915_READ(IPS_CTL) & IPS_ENABLE)
			seq_puts(m, "Currently: enabled\n");
		else
			seq_puts(m, "Currently: disabled\n");
	}
1769

1770 1771
	intel_runtime_pm_put(dev_priv);

1772 1773 1774
	return 0;
}

1775 1776
static int i915_sr_status(struct seq_file *m, void *unused)
{
1777
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1778 1779
	bool sr_enabled = false;

1780
	intel_runtime_pm_get(dev_priv);
1781
	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1782

1783
	if (HAS_PCH_SPLIT(dev_priv))
1784
		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1785
	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1786
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1787
		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1788
	else if (IS_I915GM(dev_priv))
1789
		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1790
	else if (IS_PINEVIEW(dev_priv))
1791
		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1792
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1793
		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1794

1795
	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1796 1797
	intel_runtime_pm_put(dev_priv);

1798
	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1799 1800 1801 1802

	return 0;
}

1803 1804
static int i915_emon_status(struct seq_file *m, void *unused)
{
1805 1806
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1807
	unsigned long temp, chipset, gfx;
1808 1809
	int ret;

1810
	if (!IS_GEN5(dev_priv))
1811 1812
		return -ENODEV;

1813 1814 1815
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1816 1817 1818 1819

	temp = i915_mch_val(dev_priv);
	chipset = i915_chipset_val(dev_priv);
	gfx = i915_gfx_val(dev_priv);
1820
	mutex_unlock(&dev->struct_mutex);
1821 1822 1823 1824 1825 1826 1827 1828 1829

	seq_printf(m, "GMCH temp: %ld\n", temp);
	seq_printf(m, "Chipset power: %ld\n", chipset);
	seq_printf(m, "GFX power: %ld\n", gfx);
	seq_printf(m, "Total power: %ld\n", chipset + gfx);

	return 0;
}

1830 1831
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
1832
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1833
	int ret = 0;
1834
	int gpu_freq, ia_freq;
1835
	unsigned int max_gpu_freq, min_gpu_freq;
1836

1837
	if (!HAS_LLC(dev_priv)) {
1838
		seq_puts(m, "unsupported on this chipset\n");
1839 1840 1841
		return 0;
	}

1842 1843
	intel_runtime_pm_get(dev_priv);

1844
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1845
	if (ret)
1846
		goto out;
1847

1848
	if (IS_GEN9_BC(dev_priv)) {
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
		/* Convert GT frequency to 50 HZ units */
		min_gpu_freq =
			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
		max_gpu_freq =
			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
	} else {
		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
	}

1859
	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1860

1861
	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
B
Ben Widawsky 已提交
1862 1863 1864 1865
		ia_freq = gpu_freq;
		sandybridge_pcode_read(dev_priv,
				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
				       &ia_freq);
1866
		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1867
			   intel_gpu_freq(dev_priv, (gpu_freq *
1868 1869
						     (IS_GEN9_BC(dev_priv) ?
						      GEN9_FREQ_SCALER : 1))),
1870 1871
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
1872 1873
	}

1874
	mutex_unlock(&dev_priv->rps.hw_lock);
1875

1876 1877 1878
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1879 1880
}

1881 1882
static int i915_opregion(struct seq_file *m, void *unused)
{
1883 1884
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1885 1886 1887 1888 1889
	struct intel_opregion *opregion = &dev_priv->opregion;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
1890
		goto out;
1891

1892 1893
	if (opregion->header)
		seq_write(m, opregion->header, OPREGION_SIZE);
1894 1895 1896

	mutex_unlock(&dev->struct_mutex);

1897
out:
1898 1899 1900
	return 0;
}

1901 1902
static int i915_vbt(struct seq_file *m, void *unused)
{
1903
	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1904 1905 1906 1907 1908 1909 1910

	if (opregion->vbt)
		seq_write(m, opregion->vbt, opregion->vbt_size);

	return 0;
}

1911 1912
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
1913 1914
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1915
	struct intel_framebuffer *fbdev_fb = NULL;
1916
	struct drm_framebuffer *drm_fb;
1917 1918 1919 1920 1921
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1922

1923
#ifdef CONFIG_DRM_FBDEV_EMULATION
1924 1925
	if (dev_priv->fbdev) {
		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1926 1927 1928 1929

		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
			   fbdev_fb->base.width,
			   fbdev_fb->base.height,
V
Ville Syrjälä 已提交
1930
			   fbdev_fb->base.format->depth,
V
Ville Syrjälä 已提交
1931
			   fbdev_fb->base.format->cpp[0] * 8,
V
Ville Syrjälä 已提交
1932
			   fbdev_fb->base.modifier,
1933 1934 1935 1936
			   drm_framebuffer_read_refcount(&fbdev_fb->base));
		describe_obj(m, fbdev_fb->obj);
		seq_putc(m, '\n');
	}
1937
#endif
1938

1939
	mutex_lock(&dev->mode_config.fb_lock);
1940
	drm_for_each_fb(drm_fb, dev) {
1941 1942
		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
		if (fb == fbdev_fb)
1943 1944
			continue;

1945
		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1946 1947
			   fb->base.width,
			   fb->base.height,
V
Ville Syrjälä 已提交
1948
			   fb->base.format->depth,
V
Ville Syrjälä 已提交
1949
			   fb->base.format->cpp[0] * 8,
V
Ville Syrjälä 已提交
1950
			   fb->base.modifier,
1951
			   drm_framebuffer_read_refcount(&fb->base));
1952
		describe_obj(m, fb->obj);
1953
		seq_putc(m, '\n');
1954
	}
1955
	mutex_unlock(&dev->mode_config.fb_lock);
1956
	mutex_unlock(&dev->struct_mutex);
1957 1958 1959 1960

	return 0;
}

1961
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1962 1963
{
	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1964 1965
		   ring->space, ring->head, ring->tail,
		   ring->last_retired_head);
1966 1967
}

1968 1969
static int i915_context_status(struct seq_file *m, void *unused)
{
1970 1971
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
1972
	struct intel_engine_cs *engine;
1973
	struct i915_gem_context *ctx;
1974
	enum intel_engine_id id;
1975
	int ret;
1976

1977
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1978 1979 1980
	if (ret)
		return ret;

1981
	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1982
		seq_printf(m, "HW context %u ", ctx->hw_id);
1983
		if (ctx->pid) {
1984 1985
			struct task_struct *task;

1986
			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1987 1988 1989 1990 1991
			if (task) {
				seq_printf(m, "(%s [%d]) ",
					   task->comm, task->pid);
				put_task_struct(task);
			}
1992 1993
		} else if (IS_ERR(ctx->file_priv)) {
			seq_puts(m, "(deleted) ");
1994 1995 1996 1997
		} else {
			seq_puts(m, "(kernel) ");
		}

1998 1999
		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
		seq_putc(m, '\n');
2000

2001
		for_each_engine(engine, dev_priv, id) {
2002 2003 2004 2005 2006
			struct intel_context *ce = &ctx->engine[engine->id];

			seq_printf(m, "%s: ", engine->name);
			seq_putc(m, ce->initialised ? 'I' : 'i');
			if (ce->state)
2007
				describe_obj(m, ce->state->obj);
2008
			if (ce->ring)
2009
				describe_ctx_ring(m, ce->ring);
2010 2011
			seq_putc(m, '\n');
		}
2012 2013

		seq_putc(m, '\n');
2014 2015
	}

2016
	mutex_unlock(&dev->struct_mutex);
2017 2018 2019 2020

	return 0;
}

2021
static void i915_dump_lrc_obj(struct seq_file *m,
2022
			      struct i915_gem_context *ctx,
2023
			      struct intel_engine_cs *engine)
2024
{
2025
	struct i915_vma *vma = ctx->engine[engine->id].state;
2026 2027 2028
	struct page *page;
	int j;

2029 2030
	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);

2031 2032
	if (!vma) {
		seq_puts(m, "\tFake context\n");
2033 2034 2035
		return;
	}

2036 2037
	if (vma->flags & I915_VMA_GLOBAL_BIND)
		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
2038
			   i915_ggtt_offset(vma));
2039

C
Chris Wilson 已提交
2040
	if (i915_gem_object_pin_pages(vma->obj)) {
2041
		seq_puts(m, "\tFailed to get pages for context object\n\n");
2042 2043 2044
		return;
	}

2045 2046 2047
	page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
	if (page) {
		u32 *reg_state = kmap_atomic(page);
2048 2049

		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2050 2051 2052
			seq_printf(m,
				   "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
				   j * 4,
2053 2054 2055 2056 2057 2058
				   reg_state[j], reg_state[j + 1],
				   reg_state[j + 2], reg_state[j + 3]);
		}
		kunmap_atomic(reg_state);
	}

C
Chris Wilson 已提交
2059
	i915_gem_object_unpin_pages(vma->obj);
2060 2061 2062
	seq_putc(m, '\n');
}

2063 2064
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
2065 2066
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2067
	struct intel_engine_cs *engine;
2068
	struct i915_gem_context *ctx;
2069
	enum intel_engine_id id;
2070
	int ret;
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080

	if (!i915.enable_execlists) {
		seq_printf(m, "Logical Ring Contexts are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

D
Dave Gordon 已提交
2081
	list_for_each_entry(ctx, &dev_priv->context_list, link)
2082
		for_each_engine(engine, dev_priv, id)
2083
			i915_dump_lrc_obj(m, ctx, engine);
2084 2085 2086 2087 2088 2089

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

2090 2091
static const char *swizzle_string(unsigned swizzle)
{
2092
	switch (swizzle) {
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
	case I915_BIT_6_SWIZZLE_NONE:
		return "none";
	case I915_BIT_6_SWIZZLE_9:
		return "bit9";
	case I915_BIT_6_SWIZZLE_9_10:
		return "bit9/bit10";
	case I915_BIT_6_SWIZZLE_9_11:
		return "bit9/bit11";
	case I915_BIT_6_SWIZZLE_9_10_11:
		return "bit9/bit10/bit11";
	case I915_BIT_6_SWIZZLE_9_17:
		return "bit9/bit17";
	case I915_BIT_6_SWIZZLE_9_10_17:
		return "bit9/bit10/bit17";
	case I915_BIT_6_SWIZZLE_UNKNOWN:
2108
		return "unknown";
2109 2110 2111 2112 2113 2114 2115
	}

	return "bug";
}

static int i915_swizzle_info(struct seq_file *m, void *data)
{
2116
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2117

2118
	intel_runtime_pm_get(dev_priv);
2119 2120 2121 2122 2123 2124

	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));

2125
	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2126 2127
		seq_printf(m, "DDC = 0x%08x\n",
			   I915_READ(DCC));
2128 2129
		seq_printf(m, "DDC2 = 0x%08x\n",
			   I915_READ(DCC2));
2130 2131 2132 2133
		seq_printf(m, "C0DRB3 = 0x%04x\n",
			   I915_READ16(C0DRB3));
		seq_printf(m, "C1DRB3 = 0x%04x\n",
			   I915_READ16(C1DRB3));
2134
	} else if (INTEL_GEN(dev_priv) >= 6) {
2135 2136 2137 2138 2139 2140 2141 2142
		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C0));
		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C1));
		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C2));
		seq_printf(m, "TILECTL = 0x%08x\n",
			   I915_READ(TILECTL));
2143
		if (INTEL_GEN(dev_priv) >= 8)
B
Ben Widawsky 已提交
2144 2145 2146 2147 2148
			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
				   I915_READ(GAMTARBMODE));
		else
			seq_printf(m, "ARB_MODE = 0x%08x\n",
				   I915_READ(ARB_MODE));
2149 2150
		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
			   I915_READ(DISP_ARB_CTL));
2151
	}
2152 2153 2154 2155

	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		seq_puts(m, "L-shaped memory detected\n");

2156
	intel_runtime_pm_put(dev_priv);
2157 2158 2159 2160

	return 0;
}

B
Ben Widawsky 已提交
2161 2162
static int per_file_ctx(int id, void *ptr, void *data)
{
2163
	struct i915_gem_context *ctx = ptr;
B
Ben Widawsky 已提交
2164
	struct seq_file *m = data;
2165 2166 2167 2168 2169 2170 2171
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;

	if (!ppgtt) {
		seq_printf(m, "  no ppgtt for context %d\n",
			   ctx->user_handle);
		return 0;
	}
B
Ben Widawsky 已提交
2172

2173 2174 2175
	if (i915_gem_context_is_default(ctx))
		seq_puts(m, "  default context:\n");
	else
2176
		seq_printf(m, "  context %d:\n", ctx->user_handle);
B
Ben Widawsky 已提交
2177 2178 2179 2180 2181
	ppgtt->debug_dump(ppgtt, m);

	return 0;
}

2182 2183
static void gen8_ppgtt_info(struct seq_file *m,
			    struct drm_i915_private *dev_priv)
D
Daniel Vetter 已提交
2184
{
B
Ben Widawsky 已提交
2185
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2186 2187
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
2188
	int i;
D
Daniel Vetter 已提交
2189

B
Ben Widawsky 已提交
2190 2191 2192
	if (!ppgtt)
		return;

2193
	for_each_engine(engine, dev_priv, id) {
2194
		seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
2195
		for (i = 0; i < 4; i++) {
2196
			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
B
Ben Widawsky 已提交
2197
			pdp <<= 32;
2198
			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2199
			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
B
Ben Widawsky 已提交
2200 2201 2202 2203
		}
	}
}

2204 2205
static void gen6_ppgtt_info(struct seq_file *m,
			    struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
2206
{
2207
	struct intel_engine_cs *engine;
2208
	enum intel_engine_id id;
D
Daniel Vetter 已提交
2209

2210
	if (IS_GEN6(dev_priv))
D
Daniel Vetter 已提交
2211 2212
		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));

2213
	for_each_engine(engine, dev_priv, id) {
2214
		seq_printf(m, "%s\n", engine->name);
2215
		if (IS_GEN7(dev_priv))
2216 2217 2218 2219 2220 2221 2222 2223
			seq_printf(m, "GFX_MODE: 0x%08x\n",
				   I915_READ(RING_MODE_GEN7(engine)));
		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE(engine)));
		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
			   I915_READ(RING_PP_DIR_DCLV(engine)));
D
Daniel Vetter 已提交
2224 2225 2226 2227
	}
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

2228
		seq_puts(m, "aliasing PPGTT:\n");
2229
		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
B
Ben Widawsky 已提交
2230

B
Ben Widawsky 已提交
2231
		ppgtt->debug_dump(ppgtt, m);
2232
	}
B
Ben Widawsky 已提交
2233

D
Daniel Vetter 已提交
2234
	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
B
Ben Widawsky 已提交
2235 2236 2237 2238
}

static int i915_ppgtt_info(struct seq_file *m, void *data)
{
2239 2240
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2241
	struct drm_file *file;
2242
	int ret;
B
Ben Widawsky 已提交
2243

2244 2245
	mutex_lock(&dev->filelist_mutex);
	ret = mutex_lock_interruptible(&dev->struct_mutex);
B
Ben Widawsky 已提交
2246
	if (ret)
2247 2248
		goto out_unlock;

2249
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
2250

2251 2252 2253 2254
	if (INTEL_GEN(dev_priv) >= 8)
		gen8_ppgtt_info(m, dev_priv);
	else if (INTEL_GEN(dev_priv) >= 6)
		gen6_ppgtt_info(m, dev_priv);
B
Ben Widawsky 已提交
2255

2256 2257
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
2258
		struct task_struct *task;
2259

2260
		task = get_pid_task(file->pid, PIDTYPE_PID);
2261 2262
		if (!task) {
			ret = -ESRCH;
2263
			goto out_rpm;
2264
		}
2265 2266
		seq_printf(m, "\nproc: %s\n", task->comm);
		put_task_struct(task);
2267 2268 2269 2270
		idr_for_each(&file_priv->context_idr, per_file_ctx,
			     (void *)(unsigned long)m);
	}

2271
out_rpm:
2272
	intel_runtime_pm_put(dev_priv);
D
Daniel Vetter 已提交
2273
	mutex_unlock(&dev->struct_mutex);
2274 2275
out_unlock:
	mutex_unlock(&dev->filelist_mutex);
2276
	return ret;
D
Daniel Vetter 已提交
2277 2278
}

2279 2280
static int count_irq_waiters(struct drm_i915_private *i915)
{
2281
	struct intel_engine_cs *engine;
2282
	enum intel_engine_id id;
2283 2284
	int count = 0;

2285
	for_each_engine(engine, i915, id)
2286
		count += intel_engine_has_waiter(engine);
2287 2288 2289 2290

	return count;
}

2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
static const char *rps_power_to_str(unsigned int power)
{
	static const char * const strings[] = {
		[LOW_POWER] = "low power",
		[BETWEEN] = "mixed",
		[HIGH_POWER] = "high power",
	};

	if (power >= ARRAY_SIZE(strings) || !strings[power])
		return "unknown";

	return strings[power];
}

2305 2306
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
2307 2308
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2309 2310
	struct drm_file *file;

2311
	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2312 2313
	seq_printf(m, "GPU busy? %s [%d requests]\n",
		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2314
	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2315 2316 2317
	seq_printf(m, "Frequency requested %d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2318 2319 2320 2321
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2322 2323 2324 2325
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
2326 2327

	mutex_lock(&dev->filelist_mutex);
2328
	spin_lock(&dev_priv->rps.client_lock);
2329 2330 2331 2332 2333 2334 2335 2336 2337
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "%s [%d]: %d boosts%s\n",
			   task ? task->comm : "<unknown>",
			   task ? task->pid : -1,
2338 2339
			   file_priv->rps.boosts,
			   list_empty(&file_priv->rps.link) ? "" : ", active");
2340 2341
		rcu_read_unlock();
	}
2342
	seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
2343
	spin_unlock(&dev_priv->rps.client_lock);
2344
	mutex_unlock(&dev->filelist_mutex);
2345

2346 2347
	if (INTEL_GEN(dev_priv) >= 6 &&
	    dev_priv->rps.enabled &&
2348
	    dev_priv->gt.active_requests) {
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
		u32 rpup, rpupei;
		u32 rpdown, rpdownei;

		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
			   rps_power_to_str(dev_priv->rps.power));
		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
			   100 * rpup / rpupei,
			   dev_priv->rps.up_threshold);
		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
			   100 * rpdown / rpdownei,
			   dev_priv->rps.down_threshold);
	} else {
		seq_puts(m, "\nRPS Autotuning inactive\n");
	}

2371
	return 0;
2372 2373
}

2374 2375
static int i915_llc(struct seq_file *m, void *data)
{
2376
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2377
	const bool edram = INTEL_GEN(dev_priv) > 8;
2378

2379
	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2380 2381
	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
		   intel_uncore_edram_size(dev_priv)/1024/1024);
2382 2383 2384 2385

	return 0;
}

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;

	if (!HAS_HUC_UCODE(dev_priv))
		return 0;

	seq_puts(m, "HuC firmware status:\n");
	seq_printf(m, "\tpath: %s\n", huc_fw->path);
	seq_printf(m, "\tfetch: %s\n",
		intel_uc_fw_status_repr(huc_fw->fetch_status));
	seq_printf(m, "\tload: %s\n",
		intel_uc_fw_status_repr(huc_fw->load_status));
	seq_printf(m, "\tversion wanted: %d.%d\n",
		huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
	seq_printf(m, "\tversion found: %d.%d\n",
		huc_fw->major_ver_found, huc_fw->minor_ver_found);
	seq_printf(m, "\theader: offset is %d; size = %d\n",
		huc_fw->header_offset, huc_fw->header_size);
	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
		huc_fw->ucode_offset, huc_fw->ucode_size);
	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
		huc_fw->rsa_offset, huc_fw->rsa_size);

	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));

	return 0;
}

2416 2417
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
2418
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2419
	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
2420 2421
	u32 tmp, i;

2422
	if (!HAS_GUC_UCODE(dev_priv))
2423 2424 2425 2426
		return 0;

	seq_printf(m, "GuC firmware status:\n");
	seq_printf(m, "\tpath: %s\n",
2427
		guc_fw->path);
2428
	seq_printf(m, "\tfetch: %s\n",
2429
		intel_uc_fw_status_repr(guc_fw->fetch_status));
2430
	seq_printf(m, "\tload: %s\n",
2431
		intel_uc_fw_status_repr(guc_fw->load_status));
2432
	seq_printf(m, "\tversion wanted: %d.%d\n",
2433
		guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
2434
	seq_printf(m, "\tversion found: %d.%d\n",
2435
		guc_fw->major_ver_found, guc_fw->minor_ver_found);
A
Alex Dai 已提交
2436 2437 2438 2439 2440 2441
	seq_printf(m, "\theader: offset is %d; size = %d\n",
		guc_fw->header_offset, guc_fw->header_size);
	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
		guc_fw->ucode_offset, guc_fw->ucode_size);
	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
		guc_fw->rsa_offset, guc_fw->rsa_size);
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458

	tmp = I915_READ(GUC_STATUS);

	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
	seq_printf(m, "\tBootrom status = 0x%x\n",
		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
	seq_printf(m, "\tuKernel status = 0x%x\n",
		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
	seq_printf(m, "\tMIA Core status = 0x%x\n",
		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
	seq_puts(m, "\nScratch registers:\n");
	for (i = 0; i < 16; i++)
		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));

	return 0;
}

2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
static void i915_guc_log_info(struct seq_file *m,
			      struct drm_i915_private *dev_priv)
{
	struct intel_guc *guc = &dev_priv->guc;

	seq_puts(m, "\nGuC logging stats:\n");

	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);

	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);

	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);

	seq_printf(m, "\tTotal flush interrupt count: %u\n",
		   guc->log.flush_interrupt_count);

	seq_printf(m, "\tCapture miss count: %u\n",
		   guc->log.capture_miss_count);
}

2485 2486 2487 2488
static void i915_guc_client_info(struct seq_file *m,
				 struct drm_i915_private *dev_priv,
				 struct i915_guc_client *client)
{
2489
	struct intel_engine_cs *engine;
2490
	enum intel_engine_id id;
2491 2492 2493 2494 2495
	uint64_t tot = 0;

	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
		client->priority, client->ctx_index, client->proc_desc_offset);
	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2496
		client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
2497 2498 2499
	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
		client->wq_size, client->wq_offset, client->wq_tail);

2500
	seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2501 2502 2503
	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
	seq_printf(m, "\tLast submission result: %d\n", client->retcode);

2504
	for_each_engine(engine, dev_priv, id) {
2505 2506
		u64 submissions = client->submissions[id];
		tot += submissions;
2507
		seq_printf(m, "\tSubmissions: %llu %s\n",
2508
				submissions, engine->name);
2509 2510 2511 2512 2513 2514
	}
	seq_printf(m, "\tTotal: %llu\n", tot);
}

static int i915_guc_info(struct seq_file *m, void *data)
{
2515
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2516
	const struct intel_guc *guc = &dev_priv->guc;
2517
	struct intel_engine_cs *engine;
2518
	enum intel_engine_id id;
2519
	u64 total;
2520

2521 2522 2523 2524 2525
	if (!guc->execbuf_client) {
		seq_printf(m, "GuC submission %s\n",
			   HAS_GUC_SCHED(dev_priv) ?
			   "disabled" :
			   "not supported");
A
Alex Dai 已提交
2526
		return 0;
2527
	}
2528

2529
	seq_printf(m, "Doorbell map:\n");
2530 2531
	seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2532

2533 2534 2535 2536 2537
	seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
	seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
	seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
	seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
	seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
2538

2539
	total = 0;
2540
	seq_printf(m, "\nGuC submissions:\n");
2541
	for_each_engine(engine, dev_priv, id) {
2542
		u64 submissions = guc->submissions[id];
2543
		total += submissions;
2544
		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2545
			engine->name, submissions, guc->last_seqno[id]);
2546 2547 2548
	}
	seq_printf(m, "\t%s: %llu\n", "Total", total);

2549 2550
	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2551

2552 2553
	i915_guc_log_info(m, dev_priv);

2554 2555 2556 2557 2558
	/* Add more as required ... */

	return 0;
}

A
Alex Dai 已提交
2559 2560
static int i915_guc_log_dump(struct seq_file *m, void *data)
{
2561
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2562
	struct drm_i915_gem_object *obj;
A
Alex Dai 已提交
2563 2564
	int i = 0, pg;

2565
	if (!dev_priv->guc.log.vma)
A
Alex Dai 已提交
2566 2567
		return 0;

2568
	obj = dev_priv->guc.log.vma->obj;
2569 2570
	for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
		u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
A
Alex Dai 已提交
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584

		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
				   *(log + i), *(log + i + 1),
				   *(log + i + 2), *(log + i + 3));

		kunmap_atomic(log);
	}

	seq_putc(m, '\n');

	return 0;
}

2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
static int i915_guc_log_control_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = to_i915(dev);

	if (!dev_priv->guc.log.vma)
		return -EINVAL;

	*val = i915.guc_log_level;

	return 0;
}

static int i915_guc_log_control_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = to_i915(dev);
	int ret;

	if (!dev_priv->guc.log.vma)
		return -EINVAL;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(dev_priv);
	ret = i915_guc_log_control(dev_priv, val);
	intel_runtime_pm_put(dev_priv);

	mutex_unlock(&dev->struct_mutex);
	return ret;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
			i915_guc_log_control_get, i915_guc_log_control_set,
			"%lld\n");

2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
static const char *psr2_live_status(u32 val)
{
	static const char * const live_status[] = {
		"IDLE",
		"CAPTURE",
		"CAPTURE_FS",
		"SLEEP",
		"BUFON_FW",
		"ML_UP",
		"SU_STANDBY",
		"FAST_SLEEP",
		"DEEP_SLEEP",
		"BUF_ON",
		"TG_ON"
	};

	val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
	if (val < ARRAY_SIZE(live_status))
		return live_status[val];

	return "unknown";
}

2646 2647
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
2648
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
R
Rodrigo Vivi 已提交
2649
	u32 psrperf = 0;
R
Rodrigo Vivi 已提交
2650 2651
	u32 stat[3];
	enum pipe pipe;
R
Rodrigo Vivi 已提交
2652
	bool enabled = false;
2653

2654
	if (!HAS_PSR(dev_priv)) {
2655 2656 2657 2658
		seq_puts(m, "PSR not supported\n");
		return 0;
	}

2659 2660
	intel_runtime_pm_get(dev_priv);

2661
	mutex_lock(&dev_priv->psr.lock);
R
Rodrigo Vivi 已提交
2662 2663
	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2664
	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2665
	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2666 2667 2668 2669
	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
		   dev_priv->psr.busy_frontbuffer_bits);
	seq_printf(m, "Re-enable work scheduled: %s\n",
		   yesno(work_busy(&dev_priv->psr.work.work)));
2670

2671 2672 2673 2674 2675 2676
	if (HAS_DDI(dev_priv)) {
		if (dev_priv->psr.psr2_support)
			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
		else
			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
	} else {
2677
		for_each_pipe(dev_priv, pipe) {
2678 2679 2680 2681 2682 2683 2684 2685 2686
			enum transcoder cpu_transcoder =
				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain))
				continue;

2687 2688 2689 2690 2691
			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
				VLV_EDP_PSR_CURR_STATE_MASK;
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				enabled = true;
2692 2693

			intel_display_power_put(dev_priv, power_domain);
R
Rodrigo Vivi 已提交
2694 2695
		}
	}
2696 2697 2698 2699

	seq_printf(m, "Main link in standby mode: %s\n",
		   yesno(dev_priv->psr.link_standby));

R
Rodrigo Vivi 已提交
2700 2701
	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));

2702
	if (!HAS_DDI(dev_priv))
R
Rodrigo Vivi 已提交
2703 2704 2705 2706 2707 2708
		for_each_pipe(dev_priv, pipe) {
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				seq_printf(m, " pipe %c", pipe_name(pipe));
		}
	seq_puts(m, "\n");
2709

2710 2711 2712 2713
	/*
	 * VLV/CHV PSR has no kind of performance counter
	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
	 */
2714
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2715
		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
R
Rodrigo Vivi 已提交
2716
			EDP_PSR_PERF_CNT_MASK;
R
Rodrigo Vivi 已提交
2717 2718 2719

		seq_printf(m, "Performance_Counter: %u\n", psrperf);
	}
2720
	if (dev_priv->psr.psr2_support) {
2721 2722 2723 2724
		u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);

		seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
			   psr2, psr2_live_status(psr2));
2725
	}
2726
	mutex_unlock(&dev_priv->psr.lock);
2727

2728
	intel_runtime_pm_put(dev_priv);
2729 2730 2731
	return 0;
}

2732 2733
static int i915_sink_crc(struct seq_file *m, void *data)
{
2734 2735
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2736 2737 2738 2739 2740 2741
	struct intel_connector *connector;
	struct intel_dp *intel_dp = NULL;
	int ret;
	u8 crc[6];

	drm_modeset_lock_all(dev);
2742
	for_each_intel_connector(dev, connector) {
2743
		struct drm_crtc *crtc;
2744

2745
		if (!connector->base.state->best_encoder)
2746 2747
			continue;

2748 2749
		crtc = connector->base.state->crtc;
		if (!crtc->state->active)
2750 2751
			continue;

2752
		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2753 2754
			continue;

2755
		intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771

		ret = intel_dp_sink_crc(intel_dp, crc);
		if (ret)
			goto out;

		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
			   crc[0], crc[1], crc[2],
			   crc[3], crc[4], crc[5]);
		goto out;
	}
	ret = -ENODEV;
out:
	drm_modeset_unlock_all(dev);
	return ret;
}

2772 2773
static int i915_energy_uJ(struct seq_file *m, void *data)
{
2774
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2775 2776 2777
	u64 power;
	u32 units;

2778
	if (INTEL_GEN(dev_priv) < 6)
2779 2780
		return -ENODEV;

2781 2782
	intel_runtime_pm_get(dev_priv);

2783 2784 2785 2786 2787 2788
	rdmsrl(MSR_RAPL_POWER_UNIT, power);
	power = (power & 0x1f00) >> 8;
	units = 1000000 / (1 << power); /* convert to uJ */
	power = I915_READ(MCH_SECP_NRG_STTS);
	power *= units;

2789 2790
	intel_runtime_pm_put(dev_priv);

2791
	seq_printf(m, "%llu", (long long unsigned)power);
2792 2793 2794 2795

	return 0;
}

2796
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2797
{
2798
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
D
David Weinehall 已提交
2799
	struct pci_dev *pdev = dev_priv->drm.pdev;
2800

2801 2802
	if (!HAS_RUNTIME_PM(dev_priv))
		seq_puts(m, "Runtime power management not supported\n");
2803

2804
	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2805
	seq_printf(m, "IRQs disabled: %s\n",
2806
		   yesno(!intel_irqs_enabled(dev_priv)));
2807
#ifdef CONFIG_PM
2808
	seq_printf(m, "Usage count: %d\n",
2809
		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2810 2811 2812
#else
	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
2813
	seq_printf(m, "PCI device power state: %s [%d]\n",
D
David Weinehall 已提交
2814 2815
		   pci_power_name(pdev->current_state),
		   pdev->current_state);
2816

2817 2818 2819
	return 0;
}

2820 2821
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
2822
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
	int i;

	mutex_lock(&power_domains->lock);

	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
	for (i = 0; i < power_domains->power_well_count; i++) {
		struct i915_power_well *power_well;
		enum intel_display_power_domain power_domain;

		power_well = &power_domains->power_wells[i];
		seq_printf(m, "%-25s %d\n", power_well->name,
			   power_well->count);

2837
		for_each_power_domain(power_domain, power_well->domains)
2838
			seq_printf(m, "  %-23s %d\n",
2839
				 intel_display_power_domain_str(power_domain),
2840 2841 2842 2843 2844 2845 2846 2847
				 power_domains->domain_use_count[power_domain]);
	}

	mutex_unlock(&power_domains->lock);

	return 0;
}

2848 2849
static int i915_dmc_info(struct seq_file *m, void *unused)
{
2850
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2851 2852
	struct intel_csr *csr;

2853
	if (!HAS_CSR(dev_priv)) {
2854 2855 2856 2857 2858 2859
		seq_puts(m, "not supported\n");
		return 0;
	}

	csr = &dev_priv->csr;

2860 2861
	intel_runtime_pm_get(dev_priv);

2862 2863 2864 2865
	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
	seq_printf(m, "path: %s\n", csr->fw_path);

	if (!csr->dmc_payload)
2866
		goto out;
2867 2868 2869 2870

	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
		   CSR_VERSION_MINOR(csr->version));

2871
	if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
2872 2873 2874 2875
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
		seq_printf(m, "DC5 -> DC6 count: %d\n",
			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2876
	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2877 2878
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2879 2880
	}

2881 2882 2883 2884 2885
out:
	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));

2886 2887
	intel_runtime_pm_put(dev_priv);

2888 2889 2890
	return 0;
}

2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
static void intel_seq_print_mode(struct seq_file *m, int tabs,
				 struct drm_display_mode *mode)
{
	int i;

	for (i = 0; i < tabs; i++)
		seq_putc(m, '\t');

	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
		   mode->base.id, mode->name,
		   mode->vrefresh, mode->clock,
		   mode->hdisplay, mode->hsync_start,
		   mode->hsync_end, mode->htotal,
		   mode->vdisplay, mode->vsync_start,
		   mode->vsync_end, mode->vtotal,
		   mode->type, mode->flags);
}

static void intel_encoder_info(struct seq_file *m,
			       struct intel_crtc *intel_crtc,
			       struct intel_encoder *intel_encoder)
{
2913 2914
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2915 2916 2917 2918 2919 2920
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_connector *intel_connector;
	struct drm_encoder *encoder;

	encoder = &intel_encoder->base;
	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2921
		   encoder->base.id, encoder->name);
2922 2923 2924 2925
	for_each_connector_on_encoder(dev, encoder, intel_connector) {
		struct drm_connector *connector = &intel_connector->base;
		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
			   connector->base.id,
2926
			   connector->name,
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
			   drm_get_connector_status_name(connector->status));
		if (connector->status == connector_status_connected) {
			struct drm_display_mode *mode = &crtc->mode;
			seq_printf(m, ", mode:\n");
			intel_seq_print_mode(m, 2, mode);
		} else {
			seq_putc(m, '\n');
		}
	}
}

static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
2940 2941
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
2942 2943
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_encoder *intel_encoder;
2944 2945
	struct drm_plane_state *plane_state = crtc->primary->state;
	struct drm_framebuffer *fb = plane_state->fb;
2946

2947
	if (fb)
2948
		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2949 2950
			   fb->base.id, plane_state->src_x >> 16,
			   plane_state->src_y >> 16, fb->width, fb->height);
2951 2952
	else
		seq_puts(m, "\tprimary plane disabled\n");
2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
		intel_encoder_info(m, intel_crtc, intel_encoder);
}

static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
	struct drm_display_mode *mode = panel->fixed_mode;

	seq_printf(m, "\tfixed mode:\n");
	intel_seq_print_mode(m, 2, mode);
}

static void intel_dp_info(struct seq_file *m,
			  struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);

	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2972
	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2973
	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2974
		intel_panel_info(m, &intel_connector->panel);
2975 2976 2977

	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
				&intel_dp->aux);
2978 2979
}

L
Libin Yang 已提交
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993
static void intel_dp_mst_info(struct seq_file *m,
			  struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_dp_mst_encoder *intel_mst =
		enc_to_mst(&intel_encoder->base);
	struct intel_digital_port *intel_dig_port = intel_mst->primary;
	struct intel_dp *intel_dp = &intel_dig_port->dp;
	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
					intel_connector->port);

	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
}

2994 2995 2996 2997 2998 2999
static void intel_hdmi_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);

3000
	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013
}

static void intel_lvds_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	intel_panel_info(m, &intel_connector->panel);
}

static void intel_connector_info(struct seq_file *m,
				 struct drm_connector *connector)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct intel_encoder *intel_encoder = intel_connector->encoder;
3014
	struct drm_display_mode *mode;
3015 3016

	seq_printf(m, "connector %d: type %s, status: %s\n",
3017
		   connector->base.id, connector->name,
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028
		   drm_get_connector_status_name(connector->status));
	if (connector->status == connector_status_connected) {
		seq_printf(m, "\tname: %s\n", connector->display_info.name);
		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
			   connector->display_info.width_mm,
			   connector->display_info.height_mm);
		seq_printf(m, "\tsubpixel order: %s\n",
			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
		seq_printf(m, "\tCEA rev: %d\n",
			   connector->display_info.cea_rev);
	}
3029 3030 3031 3032 3033 3034 3035

	if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
		return;

	switch (connector->connector_type) {
	case DRM_MODE_CONNECTOR_DisplayPort:
	case DRM_MODE_CONNECTOR_eDP:
L
Libin Yang 已提交
3036 3037 3038 3039
		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
			intel_dp_mst_info(m, intel_connector);
		else
			intel_dp_info(m, intel_connector);
3040 3041 3042
		break;
	case DRM_MODE_CONNECTOR_LVDS:
		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3043
			intel_lvds_info(m, intel_connector);
3044 3045 3046 3047 3048 3049 3050 3051
		break;
	case DRM_MODE_CONNECTOR_HDMIA:
		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
		    intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
			intel_hdmi_info(m, intel_connector);
		break;
	default:
		break;
3052
	}
3053

3054 3055 3056
	seq_printf(m, "\tmodes:\n");
	list_for_each_entry(mode, &connector->modes, head)
		intel_seq_print_mode(m, 2, mode);
3057 3058
}

3059
static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
3060 3061 3062
{
	u32 state;

3063
	if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
3064
		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
3065
	else
3066
		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
3067 3068 3069 3070

	return state;
}

3071 3072
static bool cursor_position(struct drm_i915_private *dev_priv,
			    int pipe, int *x, int *y)
3073 3074 3075
{
	u32 pos;

3076
	pos = I915_READ(CURPOS(pipe));
3077 3078 3079 3080 3081 3082 3083 3084 3085

	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
		*x = -*x;

	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
		*y = -*y;

3086
	return cursor_active(dev_priv, pipe);
3087 3088
}

3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115
static const char *plane_type(enum drm_plane_type type)
{
	switch (type) {
	case DRM_PLANE_TYPE_OVERLAY:
		return "OVL";
	case DRM_PLANE_TYPE_PRIMARY:
		return "PRI";
	case DRM_PLANE_TYPE_CURSOR:
		return "CUR";
	/*
	 * Deliberately omitting default: to generate compiler warnings
	 * when a new drm_plane_type gets added.
	 */
	}

	return "unknown";
}

static const char *plane_rotation(unsigned int rotation)
{
	static char buf[48];
	/*
	 * According to doc only one DRM_ROTATE_ is allowed but this
	 * will print them all to visualize if the values are misused
	 */
	snprintf(buf, sizeof(buf),
		 "%s%s%s%s%s%s(0x%08x)",
3116 3117 3118 3119 3120 3121
		 (rotation & DRM_ROTATE_0) ? "0 " : "",
		 (rotation & DRM_ROTATE_90) ? "90 " : "",
		 (rotation & DRM_ROTATE_180) ? "180 " : "",
		 (rotation & DRM_ROTATE_270) ? "270 " : "",
		 (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
		 (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
3122 3123 3124 3125 3126 3127 3128
		 rotation);

	return buf;
}

static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
3129 3130
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3131 3132 3133 3134 3135
	struct intel_plane *intel_plane;

	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
		struct drm_plane_state *state;
		struct drm_plane *plane = &intel_plane->base;
3136
		struct drm_format_name_buf format_name;
3137 3138 3139 3140 3141 3142 3143 3144

		if (!plane->state) {
			seq_puts(m, "plane->state is NULL!\n");
			continue;
		}

		state = plane->state;

3145
		if (state->fb) {
V
Ville Syrjälä 已提交
3146 3147
			drm_get_format_name(state->fb->format->format,
					    &format_name);
3148
		} else {
3149
			sprintf(format_name.str, "N/A");
3150 3151
		}

3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164
		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
			   plane->base.id,
			   plane_type(intel_plane->base.type),
			   state->crtc_x, state->crtc_y,
			   state->crtc_w, state->crtc_h,
			   (state->src_x >> 16),
			   ((state->src_x & 0xffff) * 15625) >> 10,
			   (state->src_y >> 16),
			   ((state->src_y & 0xffff) * 15625) >> 10,
			   (state->src_w >> 16),
			   ((state->src_w & 0xffff) * 15625) >> 10,
			   (state->src_h >> 16),
			   ((state->src_h & 0xffff) * 15625) >> 10,
3165
			   format_name.str,
3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
			   plane_rotation(state->rotation));
	}
}

static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
	struct intel_crtc_state *pipe_config;
	int num_scalers = intel_crtc->num_scalers;
	int i;

	pipe_config = to_intel_crtc_state(intel_crtc->base.state);

	/* Not all platformas have a scaler */
	if (num_scalers) {
		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
			   num_scalers,
			   pipe_config->scaler_state.scaler_users,
			   pipe_config->scaler_state.scaler_id);

3185
		for (i = 0; i < num_scalers; i++) {
3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
			struct intel_scaler *sc =
					&pipe_config->scaler_state.scalers[i];

			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
				   i, yesno(sc->in_use), sc->mode);
		}
		seq_puts(m, "\n");
	} else {
		seq_puts(m, "\tNo scalers available on this platform\n");
	}
}

3198 3199
static int i915_display_info(struct seq_file *m, void *unused)
{
3200 3201
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3202
	struct intel_crtc *crtc;
3203 3204
	struct drm_connector *connector;

3205
	intel_runtime_pm_get(dev_priv);
3206 3207 3208
	drm_modeset_lock_all(dev);
	seq_printf(m, "CRTC info\n");
	seq_printf(m, "---------\n");
3209
	for_each_intel_crtc(dev, crtc) {
3210
		bool active;
3211
		struct intel_crtc_state *pipe_config;
3212
		int x, y;
3213

3214 3215
		pipe_config = to_intel_crtc_state(crtc->base.state);

3216
		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3217
			   crtc->base.base.id, pipe_name(crtc->pipe),
3218
			   yesno(pipe_config->base.active),
3219 3220 3221
			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
			   yesno(pipe_config->dither), pipe_config->pipe_bpp);

3222
		if (pipe_config->base.active) {
3223 3224
			intel_crtc_info(m, crtc);

3225
			active = cursor_position(dev_priv, crtc->pipe, &x, &y);
3226
			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3227
				   yesno(crtc->cursor_base),
3228 3229
				   x, y, crtc->base.cursor->state->crtc_w,
				   crtc->base.cursor->state->crtc_h,
3230
				   crtc->cursor_addr, yesno(active));
3231 3232
			intel_scaler_info(m, crtc);
			intel_plane_info(m, crtc);
3233
		}
3234 3235 3236 3237

		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
			   yesno(!crtc->cpu_fifo_underrun_disabled),
			   yesno(!crtc->pch_fifo_underrun_disabled));
3238 3239 3240 3241 3242 3243 3244 3245 3246
	}

	seq_printf(m, "\n");
	seq_printf(m, "Connector info\n");
	seq_printf(m, "--------------\n");
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		intel_connector_info(m, connector);
	}
	drm_modeset_unlock_all(dev);
3247
	intel_runtime_pm_put(dev_priv);
3248 3249 3250 3251

	return 0;
}

3252 3253 3254 3255
static int i915_engine_info(struct seq_file *m, void *unused)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct intel_engine_cs *engine;
3256
	enum intel_engine_id id;
3257

3258 3259
	intel_runtime_pm_get(dev_priv);

3260
	for_each_engine(engine, dev_priv, id) {
3261 3262 3263 3264 3265 3266
		struct intel_breadcrumbs *b = &engine->breadcrumbs;
		struct drm_i915_gem_request *rq;
		struct rb_node *rb;
		u64 addr;

		seq_printf(m, "%s\n", engine->name);
3267
		seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
3268
			   intel_engine_get_seqno(engine),
3269
			   intel_engine_last_submit(engine),
3270
			   engine->hangcheck.seqno,
3271
			   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
3272 3273 3274 3275 3276

		rcu_read_lock();

		seq_printf(m, "\tRequests:\n");

3277 3278 3279
		rq = list_first_entry(&engine->timeline->requests,
				      struct drm_i915_gem_request, link);
		if (&rq->link != &engine->timeline->requests)
3280 3281
			print_request(m, rq, "\t\tfirst  ");

3282 3283 3284
		rq = list_last_entry(&engine->timeline->requests,
				     struct drm_i915_gem_request, link);
		if (&rq->link != &engine->timeline->requests)
3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320
			print_request(m, rq, "\t\tlast   ");

		rq = i915_gem_find_active_request(engine);
		if (rq) {
			print_request(m, rq, "\t\tactive ");
			seq_printf(m,
				   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
				   rq->head, rq->postfix, rq->tail,
				   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
				   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
		}

		seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
			   I915_READ(RING_START(engine->mmio_base)),
			   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
		seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
			   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
			   rq ? rq->ring->head : 0);
		seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
			   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
			   rq ? rq->ring->tail : 0);
		seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
			   I915_READ(RING_CTL(engine->mmio_base)),
			   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");

		rcu_read_unlock();

		addr = intel_engine_get_active_head(engine);
		seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
			   upper_32_bits(addr), lower_32_bits(addr));
		addr = intel_engine_get_last_batch_head(engine);
		seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
			   upper_32_bits(addr), lower_32_bits(addr));

		if (i915.enable_execlists) {
			u32 ptr, read, write;
3321
			struct rb_node *rb;
3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348

			seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
				   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
				   I915_READ(RING_EXECLIST_STATUS_HI(engine)));

			ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
			read = GEN8_CSB_READ_PTR(ptr);
			write = GEN8_CSB_WRITE_PTR(ptr);
			seq_printf(m, "\tExeclist CSB read %d, write %d\n",
				   read, write);
			if (read >= GEN8_CSB_ENTRIES)
				read = 0;
			if (write >= GEN8_CSB_ENTRIES)
				write = 0;
			if (read > write)
				write += GEN8_CSB_ENTRIES;
			while (read < write) {
				unsigned int idx = ++read % GEN8_CSB_ENTRIES;

				seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
					   idx,
					   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
					   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
			}

			rcu_read_lock();
			rq = READ_ONCE(engine->execlist_port[0].request);
3349 3350 3351 3352 3353
			if (rq) {
				seq_printf(m, "\t\tELSP[0] count=%d, ",
					   engine->execlist_port[0].count);
				print_request(m, rq, "rq: ");
			} else {
3354
				seq_printf(m, "\t\tELSP[0] idle\n");
3355
			}
3356
			rq = READ_ONCE(engine->execlist_port[1].request);
3357 3358 3359 3360 3361
			if (rq) {
				seq_printf(m, "\t\tELSP[1] count=%d, ",
					   engine->execlist_port[1].count);
				print_request(m, rq, "rq: ");
			} else {
3362
				seq_printf(m, "\t\tELSP[1] idle\n");
3363
			}
3364
			rcu_read_unlock();
3365

3366
			spin_lock_irq(&engine->timeline->lock);
3367 3368
			for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
				rq = rb_entry(rb, typeof(*rq), priotree.node);
3369 3370
				print_request(m, rq, "\t\tQ ");
			}
3371
			spin_unlock_irq(&engine->timeline->lock);
3372 3373 3374 3375 3376 3377 3378 3379 3380
		} else if (INTEL_GEN(dev_priv) > 6) {
			seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
				   I915_READ(RING_PP_DIR_BASE(engine)));
			seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
				   I915_READ(RING_PP_DIR_BASE_READ(engine)));
			seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
				   I915_READ(RING_PP_DIR_DCLV(engine)));
		}

3381
		spin_lock_irq(&b->lock);
3382
		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
G
Geliang Tang 已提交
3383
			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
3384 3385 3386 3387

			seq_printf(m, "\t%s [%d] waiting for %x\n",
				   w->tsk->comm, w->tsk->pid, w->seqno);
		}
3388
		spin_unlock_irq(&b->lock);
3389 3390 3391 3392

		seq_puts(m, "\n");
	}

3393 3394
	intel_runtime_pm_put(dev_priv);

3395 3396 3397
	return 0;
}

B
Ben Widawsky 已提交
3398 3399
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
3400 3401
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3402
	struct intel_engine_cs *engine;
3403
	int num_rings = INTEL_INFO(dev_priv)->num_rings;
3404 3405
	enum intel_engine_id id;
	int j, ret;
B
Ben Widawsky 已提交
3406

3407
	if (!i915.semaphores) {
B
Ben Widawsky 已提交
3408 3409 3410 3411 3412 3413 3414
		seq_puts(m, "Semaphores are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
3415
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
3416

3417
	if (IS_BROADWELL(dev_priv)) {
B
Ben Widawsky 已提交
3418 3419 3420
		struct page *page;
		uint64_t *seqno;

3421
		page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
B
Ben Widawsky 已提交
3422 3423

		seqno = (uint64_t *)kmap_atomic(page);
3424
		for_each_engine(engine, dev_priv, id) {
B
Ben Widawsky 已提交
3425 3426
			uint64_t offset;

3427
			seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
3428 3429 3430

			seq_puts(m, "  Last signal:");
			for (j = 0; j < num_rings; j++) {
3431
				offset = id * I915_NUM_ENGINES + j;
B
Ben Widawsky 已提交
3432 3433 3434 3435 3436 3437 3438
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

			seq_puts(m, "  Last wait:  ");
			for (j = 0; j < num_rings; j++) {
3439
				offset = id + (j * I915_NUM_ENGINES);
B
Ben Widawsky 已提交
3440 3441 3442 3443 3444 3445 3446 3447 3448
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

		}
		kunmap_atomic(seqno);
	} else {
		seq_puts(m, "  Last signal:");
3449
		for_each_engine(engine, dev_priv, id)
B
Ben Widawsky 已提交
3450 3451
			for (j = 0; j < num_rings; j++)
				seq_printf(m, "0x%08x\n",
3452
					   I915_READ(engine->semaphore.mbox.signal[j]));
B
Ben Widawsky 已提交
3453 3454 3455
		seq_putc(m, '\n');
	}

3456
	intel_runtime_pm_put(dev_priv);
B
Ben Widawsky 已提交
3457 3458 3459 3460
	mutex_unlock(&dev->struct_mutex);
	return 0;
}

3461 3462
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
3463 3464
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3465 3466 3467 3468 3469 3470 3471
	int i;

	drm_modeset_lock_all(dev);
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];

		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3472
		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3473
			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3474
		seq_printf(m, " tracked hardware state:\n");
3475
		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3476
		seq_printf(m, " dpll_md: 0x%08x\n",
3477 3478 3479 3480
			   pll->state.hw_state.dpll_md);
		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3481 3482 3483 3484 3485 3486
	}
	drm_modeset_unlock_all(dev);

	return 0;
}

3487
static int i915_wa_registers(struct seq_file *m, void *unused)
3488 3489 3490
{
	int i;
	int ret;
3491
	struct intel_engine_cs *engine;
3492 3493
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3494
	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3495
	enum intel_engine_id id;
3496 3497 3498 3499 3500 3501 3502

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(dev_priv);

3503
	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3504
	for_each_engine(engine, dev_priv, id)
3505
		seq_printf(m, "HW whitelist count for %s: %d\n",
3506
			   engine->name, workarounds->hw_whitelist_count[id]);
3507
	for (i = 0; i < workarounds->count; ++i) {
3508 3509
		i915_reg_t addr;
		u32 mask, value, read;
3510
		bool ok;
3511

3512 3513 3514
		addr = workarounds->reg[i].addr;
		mask = workarounds->reg[i].mask;
		value = workarounds->reg[i].value;
3515 3516 3517
		read = I915_READ(addr);
		ok = (value & mask) == (read & mask);
		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3518
			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3519 3520 3521 3522 3523 3524 3525 3526
	}

	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

3527 3528
static int i915_ddb_info(struct seq_file *m, void *unused)
{
3529 3530
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3531 3532 3533 3534 3535
	struct skl_ddb_allocation *ddb;
	struct skl_ddb_entry *entry;
	enum pipe pipe;
	int plane;

3536
	if (INTEL_GEN(dev_priv) < 9)
3537 3538
		return 0;

3539 3540 3541 3542 3543 3544 3545 3546 3547
	drm_modeset_lock_all(dev);

	ddb = &dev_priv->wm.skl_hw.ddb;

	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");

	for_each_pipe(dev_priv, pipe) {
		seq_printf(m, "Pipe %c\n", pipe_name(pipe));

3548
		for_each_universal_plane(dev_priv, pipe, plane) {
3549 3550 3551 3552 3553 3554
			entry = &ddb->plane[pipe][plane];
			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
				   entry->start, entry->end,
				   skl_ddb_entry_size(entry));
		}

3555
		entry = &ddb->plane[pipe][PLANE_CURSOR];
3556 3557 3558 3559 3560 3561 3562 3563 3564
		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
			   entry->end, skl_ddb_entry_size(entry));
	}

	drm_modeset_unlock_all(dev);

	return 0;
}

3565
static void drrs_status_per_crtc(struct seq_file *m,
3566 3567
				 struct drm_device *dev,
				 struct intel_crtc *intel_crtc)
3568
{
3569
	struct drm_i915_private *dev_priv = to_i915(dev);
3570 3571
	struct i915_drrs *drrs = &dev_priv->drrs;
	int vrefresh = 0;
3572
	struct drm_connector *connector;
3573

3574 3575 3576 3577 3578
	drm_for_each_connector(connector, dev) {
		if (connector->state->crtc != &intel_crtc->base)
			continue;

		seq_printf(m, "%s:\n", connector->name);
3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591
	}

	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Static");
	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Seamless");
	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
		seq_puts(m, "\tVBT: DRRS_type: None");
	else
		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");

	seq_puts(m, "\n\n");

3592
	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635
		struct intel_panel *panel;

		mutex_lock(&drrs->mutex);
		/* DRRS Supported */
		seq_puts(m, "\tDRRS Supported: Yes\n");

		/* disable_drrs() will make drrs->dp NULL */
		if (!drrs->dp) {
			seq_puts(m, "Idleness DRRS: Disabled");
			mutex_unlock(&drrs->mutex);
			return;
		}

		panel = &drrs->dp->attached_connector->panel;
		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
					drrs->busy_frontbuffer_bits);

		seq_puts(m, "\n\t\t");
		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
			vrefresh = panel->fixed_mode->vrefresh;
		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
			vrefresh = panel->downclock_mode->vrefresh;
		} else {
			seq_printf(m, "DRRS_State: Unknown(%d)\n",
						drrs->refresh_rate_type);
			mutex_unlock(&drrs->mutex);
			return;
		}
		seq_printf(m, "\t\tVrefresh: %d", vrefresh);

		seq_puts(m, "\n\t\t");
		mutex_unlock(&drrs->mutex);
	} else {
		/* DRRS not supported. Print the VBT parameter*/
		seq_puts(m, "\tDRRS Supported : No");
	}
	seq_puts(m, "\n");
}

static int i915_drrs_status(struct seq_file *m, void *unused)
{
3636 3637
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3638 3639 3640
	struct intel_crtc *intel_crtc;
	int active_crtc_cnt = 0;

3641
	drm_modeset_lock_all(dev);
3642
	for_each_intel_crtc(dev, intel_crtc) {
3643
		if (intel_crtc->base.state->active) {
3644 3645 3646 3647 3648 3649
			active_crtc_cnt++;
			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);

			drrs_status_per_crtc(m, dev, intel_crtc);
		}
	}
3650
	drm_modeset_unlock_all(dev);
3651 3652 3653 3654 3655 3656 3657

	if (!active_crtc_cnt)
		seq_puts(m, "No active crtc found\n");

	return 0;
}

3658 3659
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
3660 3661
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
3662 3663
	struct intel_encoder *intel_encoder;
	struct intel_digital_port *intel_dig_port;
3664 3665
	struct drm_connector *connector;

3666
	drm_modeset_lock_all(dev);
3667 3668
	drm_for_each_connector(connector, dev) {
		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3669
			continue;
3670 3671 3672 3673 3674 3675

		intel_encoder = intel_attached_encoder(connector);
		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3676 3677
		if (!intel_dig_port->dp.can_mst)
			continue;
3678

3679 3680
		seq_printf(m, "MST Source Port %c\n",
			   port_name(intel_dig_port->port));
3681 3682 3683 3684 3685 3686
		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
	}
	drm_modeset_unlock_all(dev);
	return 0;
}

3687
static ssize_t i915_displayport_test_active_write(struct file *file,
3688 3689
						  const char __user *ubuf,
						  size_t len, loff_t *offp)
3690 3691 3692 3693 3694 3695 3696 3697 3698
{
	char *input_buffer;
	int status = 0;
	struct drm_device *dev;
	struct drm_connector *connector;
	struct list_head *connector_list;
	struct intel_dp *intel_dp;
	int val = 0;

3699
	dev = ((struct seq_file *)file->private_data)->private;
3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722

	connector_list = &dev->mode_config.connector_list;

	if (len == 0)
		return 0;

	input_buffer = kmalloc(len + 1, GFP_KERNEL);
	if (!input_buffer)
		return -ENOMEM;

	if (copy_from_user(input_buffer, ubuf, len)) {
		status = -EFAULT;
		goto out;
	}

	input_buffer[len] = '\0';
	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);

	list_for_each_entry(connector, connector_list, head) {
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

3723
		if (connector->status == connector_status_connected &&
3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			status = kstrtoint(input_buffer, 10, &val);
			if (status < 0)
				goto out;
			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
			/* To prevent erroneous activation of the compliance
			 * testing code, only accept an actual value of 1 here
			 */
			if (val == 1)
3734
				intel_dp->compliance.test_active = 1;
3735
			else
3736
				intel_dp->compliance.test_active = 0;
3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
		}
	}
out:
	kfree(input_buffer);
	if (status < 0)
		return status;

	*offp += len;
	return len;
}

static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
3763
			if (intel_dp->compliance.test_active)
3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774
				seq_puts(m, "1");
			else
				seq_puts(m, "0");
		} else
			seq_puts(m, "0");
	}

	return 0;
}

static int i915_displayport_test_active_open(struct inode *inode,
3775
					     struct file *file)
3776
{
3777
	struct drm_i915_private *dev_priv = inode->i_private;
3778

3779 3780
	return single_open(file, i915_displayport_test_active_show,
			   &dev_priv->drm);
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806
}

static const struct file_operations i915_displayport_test_active_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_active_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = i915_displayport_test_active_write
};

static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
3807 3808 3809 3810
			if (intel_dp->compliance.test_type ==
			    DP_TEST_LINK_EDID_READ)
				seq_printf(m, "%lx",
					   intel_dp->compliance.test_data.edid);
3811 3812 3813 3814 3815 3816 3817 3818 3819
			else if (intel_dp->compliance.test_type ==
				 DP_TEST_LINK_VIDEO_PATTERN) {
				seq_printf(m, "hdisplay: %d\n",
					   intel_dp->compliance.test_data.hdisplay);
				seq_printf(m, "vdisplay: %d\n",
					   intel_dp->compliance.test_data.vdisplay);
				seq_printf(m, "bpc: %u\n",
					   intel_dp->compliance.test_data.bpc);
			}
3820 3821 3822 3823 3824 3825 3826
		} else
			seq_puts(m, "0");
	}

	return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
3827
					   struct file *file)
3828
{
3829
	struct drm_i915_private *dev_priv = inode->i_private;
3830

3831 3832
	return single_open(file, i915_displayport_test_data_show,
			   &dev_priv->drm);
3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857
}

static const struct file_operations i915_displayport_test_data_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_data_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {
		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
3858
			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3859 3860 3861 3862 3863 3864 3865 3866 3867 3868
		} else
			seq_puts(m, "0");
	}

	return 0;
}

static int i915_displayport_test_type_open(struct inode *inode,
				       struct file *file)
{
3869
	struct drm_i915_private *dev_priv = inode->i_private;
3870

3871 3872
	return single_open(file, i915_displayport_test_type_show,
			   &dev_priv->drm);
3873 3874 3875 3876 3877 3878 3879 3880 3881 3882
}

static const struct file_operations i915_displayport_test_type_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_type_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

3883
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3884
{
3885 3886
	struct drm_i915_private *dev_priv = m->private;
	struct drm_device *dev = &dev_priv->drm;
3887
	int level;
3888 3889
	int num_levels;

3890
	if (IS_CHERRYVIEW(dev_priv))
3891
		num_levels = 3;
3892
	else if (IS_VALLEYVIEW(dev_priv))
3893 3894
		num_levels = 1;
	else
3895
		num_levels = ilk_wm_max_level(dev_priv) + 1;
3896 3897 3898 3899 3900 3901

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++) {
		unsigned int latency = wm[level];

3902 3903
		/*
		 * - WM1+ latency values in 0.5us units
3904
		 * - latencies are in us on gen9/vlv/chv
3905
		 */
3906 3907
		if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
		    IS_CHERRYVIEW(dev_priv))
3908 3909
			latency *= 10;
		else if (level > 0)
3910 3911 3912
			latency *= 5;

		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3913
			   level, wm[level], latency / 10, latency % 10);
3914 3915 3916 3917 3918 3919 3920
	}

	drm_modeset_unlock_all(dev);
}

static int pri_wm_latency_show(struct seq_file *m, void *data)
{
3921
	struct drm_i915_private *dev_priv = m->private;
3922 3923
	const uint16_t *latencies;

3924
	if (INTEL_GEN(dev_priv) >= 9)
3925 3926
		latencies = dev_priv->wm.skl_latency;
	else
3927
		latencies = dev_priv->wm.pri_latency;
3928

3929
	wm_latency_show(m, latencies);
3930 3931 3932 3933 3934 3935

	return 0;
}

static int spr_wm_latency_show(struct seq_file *m, void *data)
{
3936
	struct drm_i915_private *dev_priv = m->private;
3937 3938
	const uint16_t *latencies;

3939
	if (INTEL_GEN(dev_priv) >= 9)
3940 3941
		latencies = dev_priv->wm.skl_latency;
	else
3942
		latencies = dev_priv->wm.spr_latency;
3943

3944
	wm_latency_show(m, latencies);
3945 3946 3947 3948 3949 3950

	return 0;
}

static int cur_wm_latency_show(struct seq_file *m, void *data)
{
3951
	struct drm_i915_private *dev_priv = m->private;
3952 3953
	const uint16_t *latencies;

3954
	if (INTEL_GEN(dev_priv) >= 9)
3955 3956
		latencies = dev_priv->wm.skl_latency;
	else
3957
		latencies = dev_priv->wm.cur_latency;
3958

3959
	wm_latency_show(m, latencies);
3960 3961 3962 3963 3964 3965

	return 0;
}

static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
3966
	struct drm_i915_private *dev_priv = inode->i_private;
3967

3968
	if (INTEL_GEN(dev_priv) < 5)
3969 3970
		return -ENODEV;

3971
	return single_open(file, pri_wm_latency_show, dev_priv);
3972 3973 3974 3975
}

static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
3976
	struct drm_i915_private *dev_priv = inode->i_private;
3977

3978
	if (HAS_GMCH_DISPLAY(dev_priv))
3979 3980
		return -ENODEV;

3981
	return single_open(file, spr_wm_latency_show, dev_priv);
3982 3983 3984 3985
}

static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
3986
	struct drm_i915_private *dev_priv = inode->i_private;
3987

3988
	if (HAS_GMCH_DISPLAY(dev_priv))
3989 3990
		return -ENODEV;

3991
	return single_open(file, cur_wm_latency_show, dev_priv);
3992 3993 3994
}

static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3995
				size_t len, loff_t *offp, uint16_t wm[8])
3996 3997
{
	struct seq_file *m = file->private_data;
3998 3999
	struct drm_i915_private *dev_priv = m->private;
	struct drm_device *dev = &dev_priv->drm;
4000
	uint16_t new[8] = { 0 };
4001
	int num_levels;
4002 4003 4004 4005
	int level;
	int ret;
	char tmp[32];

4006
	if (IS_CHERRYVIEW(dev_priv))
4007
		num_levels = 3;
4008
	else if (IS_VALLEYVIEW(dev_priv))
4009 4010
		num_levels = 1;
	else
4011
		num_levels = ilk_wm_max_level(dev_priv) + 1;
4012

4013 4014 4015 4016 4017 4018 4019 4020
	if (len >= sizeof(tmp))
		return -EINVAL;

	if (copy_from_user(tmp, ubuf, len))
		return -EFAULT;

	tmp[len] = '\0';

4021 4022 4023
	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
		     &new[0], &new[1], &new[2], &new[3],
		     &new[4], &new[5], &new[6], &new[7]);
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
	if (ret != num_levels)
		return -EINVAL;

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++)
		wm[level] = new[level];

	drm_modeset_unlock_all(dev);

	return len;
}


static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
4042
	struct drm_i915_private *dev_priv = m->private;
4043
	uint16_t *latencies;
4044

4045
	if (INTEL_GEN(dev_priv) >= 9)
4046 4047
		latencies = dev_priv->wm.skl_latency;
	else
4048
		latencies = dev_priv->wm.pri_latency;
4049 4050

	return wm_latency_write(file, ubuf, len, offp, latencies);
4051 4052 4053 4054 4055 4056
}

static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
4057
	struct drm_i915_private *dev_priv = m->private;
4058
	uint16_t *latencies;
4059

4060
	if (INTEL_GEN(dev_priv) >= 9)
4061 4062
		latencies = dev_priv->wm.skl_latency;
	else
4063
		latencies = dev_priv->wm.spr_latency;
4064 4065

	return wm_latency_write(file, ubuf, len, offp, latencies);
4066 4067 4068 4069 4070 4071
}

static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
4072
	struct drm_i915_private *dev_priv = m->private;
4073 4074
	uint16_t *latencies;

4075
	if (INTEL_GEN(dev_priv) >= 9)
4076 4077
		latencies = dev_priv->wm.skl_latency;
	else
4078
		latencies = dev_priv->wm.cur_latency;
4079

4080
	return wm_latency_write(file, ubuf, len, offp, latencies);
4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109
}

static const struct file_operations i915_pri_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = pri_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = pri_wm_latency_write
};

static const struct file_operations i915_spr_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = spr_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = spr_wm_latency_write
};

static const struct file_operations i915_cur_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = cur_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = cur_wm_latency_write
};

4110 4111
static int
i915_wedged_get(void *data, u64 *val)
4112
{
4113
	struct drm_i915_private *dev_priv = data;
4114

4115
	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4116

4117
	return 0;
4118 4119
}

4120 4121
static int
i915_wedged_set(void *data, u64 val)
4122
{
4123
	struct drm_i915_private *dev_priv = data;
4124

4125 4126 4127 4128 4129 4130 4131 4132
	/*
	 * There is no safeguard against this debugfs entry colliding
	 * with the hangcheck calling same i915_handle_error() in
	 * parallel, causing an explosion. For now we assume that the
	 * test harness is responsible enough not to inject gpu hangs
	 * while it is writing to 'i915_wedged'
	 */

4133
	if (i915_reset_in_progress(&dev_priv->gpu_error))
4134 4135
		return -EAGAIN;

4136
	i915_handle_error(dev_priv, val,
4137
			  "Manually setting wedged to %llu", val);
4138

4139
	return 0;
4140 4141
}

4142 4143
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
4144
			"%llu\n");
4145

4146 4147 4148
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
4149
	struct drm_i915_private *dev_priv = data;
4150 4151 4152 4153 4154 4155 4156 4157

	*val = dev_priv->gpu_error.missed_irq_rings;
	return 0;
}

static int
i915_ring_missed_irq_set(void *data, u64 val)
{
4158 4159
	struct drm_i915_private *dev_priv = data;
	struct drm_device *dev = &dev_priv->drm;
4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178
	int ret;

	/* Lock against concurrent debugfs callers */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
	dev_priv->gpu_error.missed_irq_rings = val;
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
			"0x%08llx\n");

static int
i915_ring_test_irq_get(void *data, u64 *val)
{
4179
	struct drm_i915_private *dev_priv = data;
4180 4181 4182 4183 4184 4185 4186 4187 4188

	*val = dev_priv->gpu_error.test_irq_rings;

	return 0;
}

static int
i915_ring_test_irq_set(void *data, u64 val)
{
4189
	struct drm_i915_private *dev_priv = data;
4190

4191
	val &= INTEL_INFO(dev_priv)->ring_mask;
4192 4193 4194 4195 4196 4197 4198 4199 4200 4201
	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
	dev_priv->gpu_error.test_irq_rings = val;

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
			i915_ring_test_irq_get, i915_ring_test_irq_set,
			"0x%08llx\n");

4202 4203 4204 4205
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
4206 4207 4208 4209 4210 4211
#define DROP_FREED 0x10
#define DROP_ALL (DROP_UNBOUND	| \
		  DROP_BOUND	| \
		  DROP_RETIRE	| \
		  DROP_ACTIVE	| \
		  DROP_FREED)
4212 4213
static int
i915_drop_caches_get(void *data, u64 *val)
4214
{
4215
	*val = DROP_ALL;
4216

4217
	return 0;
4218 4219
}

4220 4221
static int
i915_drop_caches_set(void *data, u64 val)
4222
{
4223 4224
	struct drm_i915_private *dev_priv = data;
	struct drm_device *dev = &dev_priv->drm;
4225
	int ret;
4226

4227
	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4228 4229 4230 4231 4232 4233 4234 4235

	/* No need to check and wait for gpu resets, only libdrm auto-restarts
	 * on ioctls on -EAGAIN. */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	if (val & DROP_ACTIVE) {
4236 4237 4238
		ret = i915_gem_wait_for_idle(dev_priv,
					     I915_WAIT_INTERRUPTIBLE |
					     I915_WAIT_LOCKED);
4239 4240 4241 4242 4243
		if (ret)
			goto unlock;
	}

	if (val & (DROP_RETIRE | DROP_ACTIVE))
4244
		i915_gem_retire_requests(dev_priv);
4245

4246 4247
	if (val & DROP_BOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4248

4249 4250
	if (val & DROP_UNBOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4251 4252 4253 4254

unlock:
	mutex_unlock(&dev->struct_mutex);

4255 4256
	if (val & DROP_FREED) {
		synchronize_rcu();
4257
		i915_gem_drain_freed_objects(dev_priv);
4258 4259
	}

4260
	return ret;
4261 4262
}

4263 4264 4265
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
			i915_drop_caches_get, i915_drop_caches_set,
			"0x%08llx\n");
4266

4267 4268
static int
i915_max_freq_get(void *data, u64 *val)
4269
{
4270
	struct drm_i915_private *dev_priv = data;
4271

4272
	if (INTEL_GEN(dev_priv) < 6)
4273 4274
		return -ENODEV;

4275
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4276
	return 0;
4277 4278
}

4279 4280
static int
i915_max_freq_set(void *data, u64 val)
4281
{
4282
	struct drm_i915_private *dev_priv = data;
4283
	u32 hw_max, hw_min;
4284
	int ret;
4285

4286
	if (INTEL_GEN(dev_priv) < 6)
4287
		return -ENODEV;
4288

4289
	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4290

4291
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4292 4293 4294
	if (ret)
		return ret;

4295 4296 4297
	/*
	 * Turbo will still be enabled, but won't go above the set value.
	 */
4298
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4299

4300 4301
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4302

4303
	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
J
Jeff McGee 已提交
4304 4305
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4306 4307
	}

4308
	dev_priv->rps.max_freq_softlimit = val;
J
Jeff McGee 已提交
4309

4310 4311
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
J
Jeff McGee 已提交
4312

4313
	mutex_unlock(&dev_priv->rps.hw_lock);
4314

4315
	return 0;
4316 4317
}

4318 4319
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
			i915_max_freq_get, i915_max_freq_set,
4320
			"%llu\n");
4321

4322 4323
static int
i915_min_freq_get(void *data, u64 *val)
4324
{
4325
	struct drm_i915_private *dev_priv = data;
4326

4327
	if (INTEL_GEN(dev_priv) < 6)
4328 4329
		return -ENODEV;

4330
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4331
	return 0;
4332 4333
}

4334 4335
static int
i915_min_freq_set(void *data, u64 val)
4336
{
4337
	struct drm_i915_private *dev_priv = data;
4338
	u32 hw_max, hw_min;
4339
	int ret;
4340

4341
	if (INTEL_GEN(dev_priv) < 6)
4342
		return -ENODEV;
4343

4344
	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4345

4346
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4347 4348 4349
	if (ret)
		return ret;

4350 4351 4352
	/*
	 * Turbo will still be enabled, but won't go below the set value.
	 */
4353
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4354

4355 4356
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4357

4358 4359
	if (val < hw_min ||
	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
J
Jeff McGee 已提交
4360 4361
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4362
	}
J
Jeff McGee 已提交
4363

4364
	dev_priv->rps.min_freq_softlimit = val;
J
Jeff McGee 已提交
4365

4366 4367
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
J
Jeff McGee 已提交
4368

4369
	mutex_unlock(&dev_priv->rps.hw_lock);
4370

4371
	return 0;
4372 4373
}

4374 4375
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
			i915_min_freq_get, i915_min_freq_set,
4376
			"%llu\n");
4377

4378 4379
static int
i915_cache_sharing_get(void *data, u64 *val)
4380
{
4381
	struct drm_i915_private *dev_priv = data;
4382 4383
	u32 snpcr;

4384
	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4385 4386
		return -ENODEV;

4387
	intel_runtime_pm_get(dev_priv);
4388

4389
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4390 4391

	intel_runtime_pm_put(dev_priv);
4392

4393
	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4394

4395
	return 0;
4396 4397
}

4398 4399
static int
i915_cache_sharing_set(void *data, u64 val)
4400
{
4401
	struct drm_i915_private *dev_priv = data;
4402 4403
	u32 snpcr;

4404
	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4405 4406
		return -ENODEV;

4407
	if (val > 3)
4408 4409
		return -EINVAL;

4410
	intel_runtime_pm_get(dev_priv);
4411
	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4412 4413 4414 4415 4416 4417 4418

	/* Update the cache sharing policy here as well */
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);

4419
	intel_runtime_pm_put(dev_priv);
4420
	return 0;
4421 4422
}

4423 4424 4425
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
			i915_cache_sharing_get, i915_cache_sharing_set,
			"%llu\n");
4426

4427
static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4428
					  struct sseu_dev_info *sseu)
4429
{
4430
	int ss_max = 2;
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
	int ss;
	u32 sig1[ss_max], sig2[ss_max];

	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);

	for (ss = 0; ss < ss_max; ss++) {
		unsigned int eu_cnt;

		if (sig1[ss] & CHV_SS_PG_ENABLE)
			/* skip disabled subslice */
			continue;

4446
		sseu->slice_mask = BIT(0);
4447
		sseu->subslice_mask |= BIT(ss);
4448 4449 4450 4451
		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4452 4453 4454
		sseu->eu_total += eu_cnt;
		sseu->eu_per_subslice = max_t(unsigned int,
					      sseu->eu_per_subslice, eu_cnt);
4455 4456 4457
	}
}

4458
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4459
				    struct sseu_dev_info *sseu)
4460
{
4461
	int s_max = 3, ss_max = 4;
4462 4463 4464
	int s, ss;
	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];

4465
	/* BXT has a single slice and at most 3 subslices. */
4466
	if (IS_GEN9_LP(dev_priv)) {
4467 4468 4469 4470 4471 4472 4473 4474 4475 4476
		s_max = 1;
		ss_max = 3;
	}

	for (s = 0; s < s_max; s++) {
		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
	}

4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490
	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
		     GEN9_PGCTL_SSA_EU19_ACK |
		     GEN9_PGCTL_SSA_EU210_ACK |
		     GEN9_PGCTL_SSA_EU311_ACK;
	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
		     GEN9_PGCTL_SSB_EU19_ACK |
		     GEN9_PGCTL_SSB_EU210_ACK |
		     GEN9_PGCTL_SSB_EU311_ACK;

	for (s = 0; s < s_max; s++) {
		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
			/* skip disabled slice */
			continue;

4491
		sseu->slice_mask |= BIT(s);
4492

4493
		if (IS_GEN9_BC(dev_priv))
4494 4495
			sseu->subslice_mask =
				INTEL_INFO(dev_priv)->sseu.subslice_mask;
4496

4497 4498 4499
		for (ss = 0; ss < ss_max; ss++) {
			unsigned int eu_cnt;

4500
			if (IS_GEN9_LP(dev_priv)) {
4501 4502 4503
				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
					/* skip disabled subslice */
					continue;
4504

4505 4506
				sseu->subslice_mask |= BIT(ss);
			}
4507

4508 4509
			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
					       eu_mask[ss%2]);
4510 4511 4512 4513
			sseu->eu_total += eu_cnt;
			sseu->eu_per_subslice = max_t(unsigned int,
						      sseu->eu_per_subslice,
						      eu_cnt);
4514 4515 4516 4517
		}
	}
}

4518
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4519
					 struct sseu_dev_info *sseu)
4520 4521
{
	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4522
	int s;
4523

4524
	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4525

4526
	if (sseu->slice_mask) {
4527
		sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
4528 4529
		sseu->eu_per_subslice =
				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4530 4531
		sseu->eu_total = sseu->eu_per_subslice *
				 sseu_subslice_total(sseu);
4532 4533

		/* subtract fused off EU(s) from enabled slice(s) */
4534
		for (s = 0; s < fls(sseu->slice_mask); s++) {
4535 4536
			u8 subslice_7eu =
				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4537

4538
			sseu->eu_total -= hweight8(subslice_7eu);
4539 4540 4541 4542
		}
	}
}

4543 4544 4545 4546 4547 4548
static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
				 const struct sseu_dev_info *sseu)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	const char *type = is_available_info ? "Available" : "Enabled";

4549 4550
	seq_printf(m, "  %s Slice Mask: %04x\n", type,
		   sseu->slice_mask);
4551
	seq_printf(m, "  %s Slice Total: %u\n", type,
4552
		   hweight8(sseu->slice_mask));
4553
	seq_printf(m, "  %s Subslice Total: %u\n", type,
4554
		   sseu_subslice_total(sseu));
4555 4556
	seq_printf(m, "  %s Subslice Mask: %04x\n", type,
		   sseu->subslice_mask);
4557
	seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
4558
		   hweight8(sseu->subslice_mask));
4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
	seq_printf(m, "  %s EU Total: %u\n", type,
		   sseu->eu_total);
	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
		   sseu->eu_per_subslice);

	if (!is_available_info)
		return;

	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
	if (HAS_POOLED_EU(dev_priv))
		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);

	seq_printf(m, "  Has Slice Power Gating: %s\n",
		   yesno(sseu->has_slice_pg));
	seq_printf(m, "  Has Subslice Power Gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	seq_printf(m, "  Has EU Power Gating: %s\n",
		   yesno(sseu->has_eu_pg));
}

4579 4580
static int i915_sseu_status(struct seq_file *m, void *unused)
{
4581
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4582
	struct sseu_dev_info sseu;
4583

4584
	if (INTEL_GEN(dev_priv) < 8)
4585 4586 4587
		return -ENODEV;

	seq_puts(m, "SSEU Device Info\n");
4588
	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4589

4590
	seq_puts(m, "SSEU Device Status\n");
4591
	memset(&sseu, 0, sizeof(sseu));
4592 4593 4594

	intel_runtime_pm_get(dev_priv);

4595
	if (IS_CHERRYVIEW(dev_priv)) {
4596
		cherryview_sseu_device_status(dev_priv, &sseu);
4597
	} else if (IS_BROADWELL(dev_priv)) {
4598
		broadwell_sseu_device_status(dev_priv, &sseu);
4599
	} else if (INTEL_GEN(dev_priv) >= 9) {
4600
		gen9_sseu_device_status(dev_priv, &sseu);
4601
	}
4602 4603 4604

	intel_runtime_pm_put(dev_priv);

4605
	i915_print_sseu_info(m, false, &sseu);
4606

4607 4608 4609
	return 0;
}

4610 4611
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
4612
	struct drm_i915_private *dev_priv = inode->i_private;
4613

4614
	if (INTEL_GEN(dev_priv) < 6)
4615 4616
		return 0;

4617
	intel_runtime_pm_get(dev_priv);
4618
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4619 4620 4621 4622

	return 0;
}

4623
static int i915_forcewake_release(struct inode *inode, struct file *file)
4624
{
4625
	struct drm_i915_private *dev_priv = inode->i_private;
4626

4627
	if (INTEL_GEN(dev_priv) < 6)
4628 4629
		return 0;

4630
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4631
	intel_runtime_pm_put(dev_priv);
4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646

	return 0;
}

static const struct file_operations i915_forcewake_fops = {
	.owner = THIS_MODULE,
	.open = i915_forcewake_open,
	.release = i915_forcewake_release,
};

static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
{
	struct dentry *ent;

	ent = debugfs_create_file("i915_forcewake_user",
B
Ben Widawsky 已提交
4647
				  S_IRUSR,
4648
				  root, to_i915(minor->dev),
4649
				  &i915_forcewake_fops);
4650 4651
	if (!ent)
		return -ENOMEM;
4652

B
Ben Widawsky 已提交
4653
	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
4654 4655
}

4656 4657 4658 4659
static int i915_debugfs_create(struct dentry *root,
			       struct drm_minor *minor,
			       const char *name,
			       const struct file_operations *fops)
4660 4661 4662
{
	struct dentry *ent;

4663
	ent = debugfs_create_file(name,
4664
				  S_IRUGO | S_IWUSR,
4665
				  root, to_i915(minor->dev),
4666
				  fops);
4667 4668
	if (!ent)
		return -ENOMEM;
4669

4670
	return drm_add_fake_info_node(minor, ent, fops);
4671 4672
}

4673
static const struct drm_info_list i915_debugfs_list[] = {
C
Chris Wilson 已提交
4674
	{"i915_capabilities", i915_capabilities, 0},
4675
	{"i915_gem_objects", i915_gem_object_info, 0},
4676
	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4677
	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
4678
	{"i915_gem_stolen", i915_gem_stolen_list_info },
4679
	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
4680 4681
	{"i915_gem_request", i915_gem_request_info, 0},
	{"i915_gem_seqno", i915_gem_seqno_info, 0},
4682
	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4683
	{"i915_gem_interrupt", i915_interrupt_info, 0},
4684
	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4685
	{"i915_guc_info", i915_guc_info, 0},
4686
	{"i915_guc_load_status", i915_guc_load_status_info, 0},
A
Alex Dai 已提交
4687
	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4688
	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4689
	{"i915_frequency_info", i915_frequency_info, 0},
4690
	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4691
	{"i915_drpc_info", i915_drpc_info, 0},
4692
	{"i915_emon_status", i915_emon_status, 0},
4693
	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4694
	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4695
	{"i915_fbc_status", i915_fbc_status, 0},
4696
	{"i915_ips_status", i915_ips_status, 0},
4697
	{"i915_sr_status", i915_sr_status, 0},
4698
	{"i915_opregion", i915_opregion, 0},
4699
	{"i915_vbt", i915_vbt, 0},
4700
	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4701
	{"i915_context_status", i915_context_status, 0},
4702
	{"i915_dump_lrc", i915_dump_lrc, 0},
4703
	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4704
	{"i915_swizzle_info", i915_swizzle_info, 0},
D
Daniel Vetter 已提交
4705
	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4706
	{"i915_llc", i915_llc, 0},
4707
	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4708
	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4709
	{"i915_energy_uJ", i915_energy_uJ, 0},
4710
	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4711
	{"i915_power_domain_info", i915_power_domain_info, 0},
4712
	{"i915_dmc_info", i915_dmc_info, 0},
4713
	{"i915_display_info", i915_display_info, 0},
4714
	{"i915_engine_info", i915_engine_info, 0},
B
Ben Widawsky 已提交
4715
	{"i915_semaphore_status", i915_semaphore_status, 0},
4716
	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4717
	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4718
	{"i915_wa_registers", i915_wa_registers, 0},
4719
	{"i915_ddb_info", i915_ddb_info, 0},
4720
	{"i915_sseu_status", i915_sseu_status, 0},
4721
	{"i915_drrs_status", i915_drrs_status, 0},
4722
	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4723
};
4724
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4725

4726
static const struct i915_debugfs_files {
4727 4728 4729 4730 4731 4732 4733
	const char *name;
	const struct file_operations *fops;
} i915_debugfs_files[] = {
	{"i915_wedged", &i915_wedged_fops},
	{"i915_max_freq", &i915_max_freq_fops},
	{"i915_min_freq", &i915_min_freq_fops},
	{"i915_cache_sharing", &i915_cache_sharing_fops},
4734 4735
	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4736
	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4737
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4738
	{"i915_error_state", &i915_error_state_fops},
4739
#endif
4740
	{"i915_next_seqno", &i915_next_seqno_fops},
4741
	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4742 4743 4744
	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4745
	{"i915_fbc_false_color", &i915_fbc_fc_fops},
4746 4747
	{"i915_dp_test_data", &i915_displayport_test_data_fops},
	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4748 4749
	{"i915_dp_test_active", &i915_displayport_test_active_fops},
	{"i915_guc_log_control", &i915_guc_log_control_fops}
4750 4751
};

4752
int i915_debugfs_register(struct drm_i915_private *dev_priv)
4753
{
4754
	struct drm_minor *minor = dev_priv->drm.primary;
4755
	int ret, i;
4756

4757
	ret = i915_forcewake_create(minor->debugfs_root, minor);
4758 4759
	if (ret)
		return ret;
4760

4761 4762 4763
	ret = intel_pipe_crc_create(minor);
	if (ret)
		return ret;
4764

4765 4766 4767 4768 4769 4770 4771
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
		ret = i915_debugfs_create(minor->debugfs_root, minor,
					  i915_debugfs_files[i].name,
					  i915_debugfs_files[i].fops);
		if (ret)
			return ret;
	}
4772

4773 4774
	return drm_debugfs_create_files(i915_debugfs_list,
					I915_DEBUGFS_ENTRIES,
4775 4776 4777
					minor->debugfs_root, minor);
}

4778
void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
4779
{
4780
	struct drm_minor *minor = dev_priv->drm.primary;
4781 4782
	int i;

4783 4784
	drm_debugfs_remove_files(i915_debugfs_list,
				 I915_DEBUGFS_ENTRIES, minor);
4785

4786
	drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
4787
				 1, minor);
4788

4789
	intel_pipe_crc_cleanup(minor);
4790

4791 4792
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
		struct drm_info_list *info_list =
4793
			(struct drm_info_list *)i915_debugfs_files[i].fops;
4794 4795 4796

		drm_debugfs_remove_files(info_list, 1, minor);
	}
4797
}
4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831

struct dpcd_block {
	/* DPCD dump start address. */
	unsigned int offset;
	/* DPCD dump end address, inclusive. If unset, .size will be used. */
	unsigned int end;
	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
	size_t size;
	/* Only valid for eDP. */
	bool edp;
};

static const struct dpcd_block i915_dpcd_debug[] = {
	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
	{ .offset = DP_SET_POWER },
	{ .offset = DP_EDP_DPCD_REV },
	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
};

static int i915_dpcd_show(struct seq_file *m, void *data)
{
	struct drm_connector *connector = m->private;
	struct intel_dp *intel_dp =
		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
	uint8_t buf[16];
	ssize_t err;
	int i;

4832 4833 4834
	if (connector->status != connector_status_connected)
		return -ENODEV;

4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854
	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
		const struct dpcd_block *b = &i915_dpcd_debug[i];
		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);

		if (b->edp &&
		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		/* low tech for now */
		if (WARN_ON(size > sizeof(buf)))
			continue;

		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
		if (err <= 0) {
			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
				  size, b->offset, err);
			continue;
		}

		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4855
	}
4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872

	return 0;
}

static int i915_dpcd_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_dpcd_show, inode->i_private);
}

static const struct file_operations i915_dpcd_fops = {
	.owner = THIS_MODULE,
	.open = i915_dpcd_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906
static int i915_panel_show(struct seq_file *m, void *data)
{
	struct drm_connector *connector = m->private;
	struct intel_dp *intel_dp =
		enc_to_intel_dp(&intel_attached_encoder(connector)->base);

	if (connector->status != connector_status_connected)
		return -ENODEV;

	seq_printf(m, "Panel power up delay: %d\n",
		   intel_dp->panel_power_up_delay);
	seq_printf(m, "Panel power down delay: %d\n",
		   intel_dp->panel_power_down_delay);
	seq_printf(m, "Backlight on delay: %d\n",
		   intel_dp->backlight_on_delay);
	seq_printf(m, "Backlight off delay: %d\n",
		   intel_dp->backlight_off_delay);

	return 0;
}

static int i915_panel_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_panel_show, inode->i_private);
}

static const struct file_operations i915_panel_fops = {
	.owner = THIS_MODULE,
	.open = i915_panel_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925
/**
 * i915_debugfs_connector_add - add i915 specific connector debugfs files
 * @connector: pointer to a registered drm_connector
 *
 * Cleanup will be done by drm_connector_unregister() through a call to
 * drm_debugfs_connector_remove().
 *
 * Returns 0 on success, negative error codes on error.
 */
int i915_debugfs_connector_add(struct drm_connector *connector)
{
	struct dentry *root = connector->debugfs_entry;

	/* The connector must have been registered beforehands. */
	if (!root)
		return -ENODEV;

	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4926 4927 4928 4929 4930 4931
		debugfs_create_file("i915_dpcd", S_IRUGO, root,
				    connector, &i915_dpcd_fops);

	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
				    connector, &i915_panel_fops);
4932 4933 4934

	return 0;
}