i915_debugfs.c 148.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *
 */

#include <linux/seq_file.h>
30
#include <linux/circ_buf.h>
31
#include <linux/ctype.h>
32
#include <linux/debugfs.h>
33
#include <linux/slab.h>
34
#include <linux/export.h>
35
#include <linux/list_sort.h>
36
#include <asm/msr-index.h>
37
#include <drm/drmP.h>
38
#include "intel_drv.h"
39
#include "intel_ringbuffer.h"
40
#include <drm/i915_drm.h>
41 42
#include "i915_drv.h"

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
/* As the drm_debugfs_init() routines are called before dev->dev_private is
 * allocated we need to hook into the minor for release. */
static int
drm_add_fake_info_node(struct drm_minor *minor,
		       struct dentry *ent,
		       const void *key)
{
	struct drm_info_node *node;

	node = kmalloc(sizeof(*node), GFP_KERNEL);
	if (node == NULL) {
		debugfs_remove(ent);
		return -ENOMEM;
	}

	node->minor = minor;
	node->dent = ent;
	node->info_ent = (void *) key;

	mutex_lock(&minor->debugfs_lock);
	list_add(&node->list, &minor->debugfs_list);
	mutex_unlock(&minor->debugfs_lock);

	return 0;
}

69 70
static int i915_capabilities(struct seq_file *m, void *data)
{
71
	struct drm_info_node *node = m->private;
72 73 74 75
	struct drm_device *dev = node->minor->dev;
	const struct intel_device_info *info = INTEL_INFO(dev);

	seq_printf(m, "gen: %d\n", info->gen);
76
	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
77 78 79 80 81
#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
#undef PRINT_FLAG
#undef SEP_SEMICOLON
82 83 84

	return 0;
}
85

86
static char get_active_flag(struct drm_i915_gem_object *obj)
87
{
88
	return i915_gem_object_is_active(obj) ? '*' : ' ';
89 90
}

91
static char get_pin_flag(struct drm_i915_gem_object *obj)
92 93 94 95
{
	return obj->pin_display ? 'p' : ' ';
}

96
static char get_tiling_flag(struct drm_i915_gem_object *obj)
97
{
98
	switch (i915_gem_object_get_tiling(obj)) {
99
	default:
100 101 102
	case I915_TILING_NONE: return ' ';
	case I915_TILING_X: return 'X';
	case I915_TILING_Y: return 'Y';
103
	}
104 105
}

106
static char get_global_flag(struct drm_i915_gem_object *obj)
107 108 109 110
{
	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}

111
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
112
{
113
	return obj->mapping ? 'M' : ' ';
B
Ben Widawsky 已提交
114 115
}

116 117 118 119 120
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
{
	u64 size = 0;
	struct i915_vma *vma;

121
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
122
		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
123 124 125 126 127 128
			size += vma->node.size;
	}

	return size;
}

129 130 131
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
132
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
133
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
134
	struct i915_vma *vma;
135
	unsigned int frontbuffer_bits;
B
Ben Widawsky 已提交
136
	int pin_count = 0;
137
	enum intel_engine_id id;
B
Ben Widawsky 已提交
138

139 140
	lockdep_assert_held(&obj->base.dev->struct_mutex);

141
	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
142
		   &obj->base,
143
		   get_active_flag(obj),
144 145
		   get_pin_flag(obj),
		   get_tiling_flag(obj),
B
Ben Widawsky 已提交
146
		   get_global_flag(obj),
147
		   get_pin_mapped_flag(obj),
148
		   obj->base.size / 1024,
149
		   obj->base.read_domains,
150
		   obj->base.write_domain);
151
	for_each_engine_id(engine, dev_priv, id)
152
		seq_printf(m, "%x ",
153 154
			   i915_gem_active_get_seqno(&obj->last_read[id],
						     &obj->base.dev->struct_mutex));
155
	seq_printf(m, "] %x %x%s%s%s",
156 157 158 159
		   i915_gem_active_get_seqno(&obj->last_write,
					     &obj->base.dev->struct_mutex),
		   i915_gem_active_get_seqno(&obj->last_fence,
					     &obj->base.dev->struct_mutex),
160
		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
161 162 163 164
		   obj->dirty ? " dirty" : "",
		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
165
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
166
		if (i915_vma_is_pinned(vma))
B
Ben Widawsky 已提交
167
			pin_count++;
D
Dan Carpenter 已提交
168 169
	}
	seq_printf(m, " (pinned x %d)", pin_count);
170 171
	if (obj->pin_display)
		seq_printf(m, " (display)");
172 173
	if (obj->fence_reg != I915_FENCE_REG_NONE)
		seq_printf(m, " (fence: %d)", obj->fence_reg);
174
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
175 176 177
		if (!drm_mm_node_allocated(&vma->node))
			continue;

178
		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
179
			   i915_vma_is_ggtt(vma) ? "g" : "pp",
180
			   vma->node.start, vma->node.size);
181
		if (i915_vma_is_ggtt(vma))
182 183
			seq_printf(m, ", type: %u", vma->ggtt_view.type);
		seq_puts(m, ")");
B
Ben Widawsky 已提交
184
	}
185
	if (obj->stolen)
186
		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
187
	if (obj->pin_display || obj->fault_mappable) {
188
		char s[3], *t = s;
189
		if (obj->pin_display)
190 191 192 193 194 195
			*t++ = 'p';
		if (obj->fault_mappable)
			*t++ = 'f';
		*t = '\0';
		seq_printf(m, " (%s mappable)", s);
	}
196

197 198
	engine = i915_gem_active_get_engine(&obj->last_write,
					    &obj->base.dev->struct_mutex);
199 200 201
	if (engine)
		seq_printf(m, " (%s)", engine->name);

202 203 204
	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
	if (frontbuffer_bits)
		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
205 206
}

207 208 209 210
static int obj_rank_by_stolen(void *priv,
			      struct list_head *A, struct list_head *B)
{
	struct drm_i915_gem_object *a =
211
		container_of(A, struct drm_i915_gem_object, obj_exec_link);
212
	struct drm_i915_gem_object *b =
213
		container_of(B, struct drm_i915_gem_object, obj_exec_link);
214

R
Rasmus Villemoes 已提交
215 216 217 218 219
	if (a->stolen->start < b->stolen->start)
		return -1;
	if (a->stolen->start > b->stolen->start)
		return 1;
	return 0;
220 221 222 223
}

static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
224
	struct drm_info_node *node = m->private;
225
	struct drm_device *dev = node->minor->dev;
226
	struct drm_i915_private *dev_priv = to_i915(dev);
227
	struct drm_i915_gem_object *obj;
228
	u64 total_obj_size, total_gtt_size;
229 230 231 232 233 234 235 236 237 238 239 240
	LIST_HEAD(stolen);
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
		if (obj->stolen == NULL)
			continue;

241
		list_add(&obj->obj_exec_link, &stolen);
242 243

		total_obj_size += obj->base.size;
244
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
245 246 247 248 249 250
		count++;
	}
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
		if (obj->stolen == NULL)
			continue;

251
		list_add(&obj->obj_exec_link, &stolen);
252 253 254 255 256 257 258

		total_obj_size += obj->base.size;
		count++;
	}
	list_sort(NULL, &stolen, obj_rank_by_stolen);
	seq_puts(m, "Stolen:\n");
	while (!list_empty(&stolen)) {
259
		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
260 261 262
		seq_puts(m, "   ");
		describe_obj(m, obj);
		seq_putc(m, '\n');
263
		list_del_init(&obj->obj_exec_link);
264 265 266
	}
	mutex_unlock(&dev->struct_mutex);

267
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
268 269 270 271
		   count, total_obj_size, total_gtt_size);
	return 0;
}

272
struct file_stats {
273
	struct drm_i915_file_private *file_priv;
274 275 276 277
	unsigned long count;
	u64 total, unbound;
	u64 global, shared;
	u64 active, inactive;
278 279 280 281 282 283
};

static int per_file_stats(int id, void *ptr, void *data)
{
	struct drm_i915_gem_object *obj = ptr;
	struct file_stats *stats = data;
284
	struct i915_vma *vma;
285 286 287

	stats->count++;
	stats->total += obj->base.size;
288 289
	if (!obj->bind_count)
		stats->unbound += obj->base.size;
290 291 292
	if (obj->base.name || obj->base.dma_buf)
		stats->shared += obj->base.size;

293 294 295
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!drm_mm_node_allocated(&vma->node))
			continue;
296

297
		if (i915_vma_is_ggtt(vma)) {
298 299 300
			stats->global += vma->node.size;
		} else {
			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
301

302
			if (ppgtt->base.file != stats->file_priv)
303 304
				continue;
		}
305

306
		if (i915_vma_is_active(vma))
307 308 309
			stats->active += vma->node.size;
		else
			stats->inactive += vma->node.size;
310 311 312 313 314
	}

	return 0;
}

315 316
#define print_file_stats(m, name, stats) do { \
	if (stats.count) \
317
		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
318 319 320 321 322 323 324 325 326
			   name, \
			   stats.count, \
			   stats.total, \
			   stats.active, \
			   stats.inactive, \
			   stats.global, \
			   stats.shared, \
			   stats.unbound); \
} while (0)
327 328 329 330 331 332

static void print_batch_pool_stats(struct seq_file *m,
				   struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
	struct file_stats stats;
333
	struct intel_engine_cs *engine;
334
	int j;
335 336 337

	memset(&stats, 0, sizeof(stats));

338
	for_each_engine(engine, dev_priv) {
339
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
340
			list_for_each_entry(obj,
341
					    &engine->batch_pool.cache_list[j],
342 343 344
					    batch_pool_link)
				per_file_stats(0, obj, &stats);
		}
345
	}
346

347
	print_file_stats(m, "[k]batch pool", stats);
348 349
}

350 351 352 353 354 355 356 357
static int per_file_ctx_stats(int id, void *ptr, void *data)
{
	struct i915_gem_context *ctx = ptr;
	int n;

	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
		if (ctx->engine[n].state)
			per_file_stats(0, ctx->engine[n].state, data);
358 359
		if (ctx->engine[n].ring)
			per_file_stats(0, ctx->engine[n].ring->obj, data);
360 361 362 363 364 365 366 367 368 369 370 371 372
	}

	return 0;
}

static void print_context_stats(struct seq_file *m,
				struct drm_i915_private *dev_priv)
{
	struct file_stats stats;
	struct drm_file *file;

	memset(&stats, 0, sizeof(stats));

373
	mutex_lock(&dev_priv->drm.struct_mutex);
374 375 376
	if (dev_priv->kernel_context)
		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);

377
	list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
378 379 380
		struct drm_i915_file_private *fpriv = file->driver_priv;
		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
	}
381
	mutex_unlock(&dev_priv->drm.struct_mutex);
382 383 384 385

	print_file_stats(m, "[k]contexts", stats);
}

B
Ben Widawsky 已提交
386
static int i915_gem_object_info(struct seq_file *m, void* data)
387
{
388
	struct drm_info_node *node = m->private;
389
	struct drm_device *dev = node->minor->dev;
390 391
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
392 393
	u32 count, mapped_count, purgeable_count, dpy_count;
	u64 size, mapped_size, purgeable_size, dpy_size;
394
	struct drm_i915_gem_object *obj;
395
	struct drm_file *file;
396 397 398 399 400 401
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

402 403 404 405
	seq_printf(m, "%u objects, %zu bytes\n",
		   dev_priv->mm.object_count,
		   dev_priv->mm.object_memory);

406
	size = count = purgeable_size = purgeable_count = 0;
407
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
408 409 410 411 412 413 414 415
		size += obj->base.size;
		++count;

		if (obj->madv == I915_MADV_DONTNEED) {
			purgeable_size += obj->base.size;
			++purgeable_count;
		}

416
		if (obj->mapping) {
417 418
			mapped_count++;
			mapped_size += obj->base.size;
419
		}
420
	}
421
	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
C
Chris Wilson 已提交
422

423
	size = count = dpy_size = dpy_count = 0;
424
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
425 426 427
		size += obj->base.size;
		++count;

428
		if (obj->pin_display) {
429 430
			dpy_size += obj->base.size;
			++dpy_count;
431
		}
432

433 434 435 436
		if (obj->madv == I915_MADV_DONTNEED) {
			purgeable_size += obj->base.size;
			++purgeable_count;
		}
437

438
		if (obj->mapping) {
439 440
			mapped_count++;
			mapped_size += obj->base.size;
441
		}
442
	}
443 444
	seq_printf(m, "%u bound objects, %llu bytes\n",
		   count, size);
445
	seq_printf(m, "%u purgeable objects, %llu bytes\n",
446
		   purgeable_count, purgeable_size);
447 448 449 450
	seq_printf(m, "%u mapped objects, %llu bytes\n",
		   mapped_count, mapped_size);
	seq_printf(m, "%u display objects (pinned), %llu bytes\n",
		   dpy_count, dpy_size);
451

452
	seq_printf(m, "%llu [%llu] gtt total\n",
453
		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
454

455 456
	seq_putc(m, '\n');
	print_batch_pool_stats(m, dev_priv);
457 458 459
	mutex_unlock(&dev->struct_mutex);

	mutex_lock(&dev->filelist_mutex);
460
	print_context_stats(m, dev_priv);
461 462
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct file_stats stats;
463
		struct task_struct *task;
464 465

		memset(&stats, 0, sizeof(stats));
466
		stats.file_priv = file->driver_priv;
467
		spin_lock(&file->table_lock);
468
		idr_for_each(&file->object_idr, per_file_stats, &stats);
469
		spin_unlock(&file->table_lock);
470 471 472 473 474 475 476 477
		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
478
		print_file_stats(m, task ? task->comm : "<unknown>", stats);
479
		rcu_read_unlock();
480
	}
481
	mutex_unlock(&dev->filelist_mutex);
482 483 484 485

	return 0;
}

486
static int i915_gem_gtt_info(struct seq_file *m, void *data)
487
{
488
	struct drm_info_node *node = m->private;
489
	struct drm_device *dev = node->minor->dev;
490
	struct drm_i915_private *dev_priv = to_i915(dev);
491
	bool show_pin_display_only = !!data;
492
	struct drm_i915_gem_object *obj;
493
	u64 total_obj_size, total_gtt_size;
494 495 496 497 498 499 500
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
501
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
502
		if (show_pin_display_only && !obj->pin_display)
503 504
			continue;

505
		seq_puts(m, "   ");
506
		describe_obj(m, obj);
507
		seq_putc(m, '\n');
508
		total_obj_size += obj->base.size;
509
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
510 511 512 513 514
		count++;
	}

	mutex_unlock(&dev->struct_mutex);

515
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
516 517 518 519 520
		   count, total_obj_size, total_gtt_size);

	return 0;
}

521 522
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
523
	struct drm_info_node *node = m->private;
524
	struct drm_device *dev = node->minor->dev;
525
	struct drm_i915_private *dev_priv = to_i915(dev);
526
	struct intel_crtc *crtc;
527 528 529 530 531
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
532

533
	for_each_intel_crtc(dev, crtc) {
534 535
		const char pipe = pipe_name(crtc->pipe);
		const char plane = plane_name(crtc->plane);
536
		struct intel_flip_work *work;
537

538
		spin_lock_irq(&dev->event_lock);
539 540
		work = crtc->flip_work;
		if (work == NULL) {
541
			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
542 543
				   pipe, plane);
		} else {
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
			u32 pending;
			u32 addr;

			pending = atomic_read(&work->pending);
			if (pending) {
				seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
					   pipe, plane);
			} else {
				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
					   pipe, plane);
			}
			if (work->flip_queued_req) {
				struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);

				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
					   engine->name,
					   i915_gem_request_get_seqno(work->flip_queued_req),
					   dev_priv->next_seqno,
562
					   intel_engine_get_seqno(engine),
563
					   i915_gem_request_completed(work->flip_queued_req));
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
			} else
				seq_printf(m, "Flip not associated with any ring\n");
			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
				   work->flip_queued_vblank,
				   work->flip_ready_vblank,
				   intel_crtc_get_vblank_counter(crtc));
			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));

			if (INTEL_INFO(dev)->gen >= 4)
				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
			else
				addr = I915_READ(DSPADDR(crtc->plane));
			seq_printf(m, "Current scanout address 0x%08x\n", addr);

			if (work->pending_flip_obj) {
				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
581 582
			}
		}
583
		spin_unlock_irq(&dev->event_lock);
584 585
	}

586 587
	mutex_unlock(&dev->struct_mutex);

588 589 590
	return 0;
}

591 592 593 594
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
595
	struct drm_i915_private *dev_priv = to_i915(dev);
596
	struct drm_i915_gem_object *obj;
597
	struct intel_engine_cs *engine;
598
	int total = 0;
599
	int ret, j;
600 601 602 603 604

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

605
	for_each_engine(engine, dev_priv) {
606
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
607 608 609 610
			int count;

			count = 0;
			list_for_each_entry(obj,
611
					    &engine->batch_pool.cache_list[j],
612 613 614
					    batch_pool_link)
				count++;
			seq_printf(m, "%s cache[%d]: %d objects\n",
615
				   engine->name, j, count);
616 617

			list_for_each_entry(obj,
618
					    &engine->batch_pool.cache_list[j],
619 620 621 622 623 624 625
					    batch_pool_link) {
				seq_puts(m, "   ");
				describe_obj(m, obj);
				seq_putc(m, '\n');
			}

			total += count;
626
		}
627 628
	}

629
	seq_printf(m, "total: %d\n", total);
630 631 632 633 634 635

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

636 637
static int i915_gem_request_info(struct seq_file *m, void *data)
{
638
	struct drm_info_node *node = m->private;
639
	struct drm_device *dev = node->minor->dev;
640
	struct drm_i915_private *dev_priv = to_i915(dev);
641
	struct intel_engine_cs *engine;
D
Daniel Vetter 已提交
642
	struct drm_i915_gem_request *req;
643
	int ret, any;
644 645 646 647

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
648

649
	any = 0;
650
	for_each_engine(engine, dev_priv) {
651 652 653
		int count;

		count = 0;
654
		list_for_each_entry(req, &engine->request_list, link)
655 656
			count++;
		if (count == 0)
657 658
			continue;

659
		seq_printf(m, "%s requests: %d\n", engine->name, count);
660
		list_for_each_entry(req, &engine->request_list, link) {
661 662 663 664
			struct task_struct *task;

			rcu_read_lock();
			task = NULL;
D
Daniel Vetter 已提交
665 666
			if (req->pid)
				task = pid_task(req->pid, PIDTYPE_PID);
667
			seq_printf(m, "    %x @ %d: %s [%d]\n",
668
				   req->fence.seqno,
D
Daniel Vetter 已提交
669
				   (int) (jiffies - req->emitted_jiffies),
670 671 672
				   task ? task->comm : "<unknown>",
				   task ? task->pid : -1);
			rcu_read_unlock();
673
		}
674 675

		any++;
676
	}
677 678
	mutex_unlock(&dev->struct_mutex);

679
	if (any == 0)
680
		seq_puts(m, "No requests\n");
681

682 683 684
	return 0;
}

685
static void i915_ring_seqno_info(struct seq_file *m,
686
				 struct intel_engine_cs *engine)
687
{
688 689 690
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
	struct rb_node *rb;

691
	seq_printf(m, "Current sequence (%s): %x\n",
692
		   engine->name, intel_engine_get_seqno(engine));
693 694 695 696 697 698 699 700 701

	spin_lock(&b->lock);
	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
		struct intel_wait *w = container_of(rb, typeof(*w), node);

		seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
			   engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
	}
	spin_unlock(&b->lock);
702 703
}

704 705
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
706
	struct drm_info_node *node = m->private;
707
	struct drm_device *dev = node->minor->dev;
708
	struct drm_i915_private *dev_priv = to_i915(dev);
709
	struct intel_engine_cs *engine;
710
	int ret;
711 712 713 714

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
715
	intel_runtime_pm_get(dev_priv);
716

717
	for_each_engine(engine, dev_priv)
718
		i915_ring_seqno_info(m, engine);
719

720
	intel_runtime_pm_put(dev_priv);
721 722
	mutex_unlock(&dev->struct_mutex);

723 724 725 726 727 728
	return 0;
}


static int i915_interrupt_info(struct seq_file *m, void *data)
{
729
	struct drm_info_node *node = m->private;
730
	struct drm_device *dev = node->minor->dev;
731
	struct drm_i915_private *dev_priv = to_i915(dev);
732
	struct intel_engine_cs *engine;
733
	int ret, i, pipe;
734 735 736 737

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
738
	intel_runtime_pm_get(dev_priv);
739

740 741 742 743 744 745 746 747 748 749 750 751
	if (IS_CHERRYVIEW(dev)) {
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
752
		for_each_pipe(dev_priv, pipe)
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
	} else if (INTEL_INFO(dev)->gen >= 8) {
780 781 782 783 784 785 786 787 788 789 790 791
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

792
		for_each_pipe(dev_priv, pipe) {
793 794 795 796 797
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
798 799 800 801
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}
802
			seq_printf(m, "Pipe %c IMR:\t%08x\n",
803 804
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
805
			seq_printf(m, "Pipe %c IIR:\t%08x\n",
806 807
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
808
			seq_printf(m, "Pipe %c IER:\t%08x\n",
809 810
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
811 812

			intel_display_power_put(dev_priv, power_domain);
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
		}

		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IMR));
		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IIR));
		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IER));

		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IMR));
		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IIR));
		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IER));

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
	} else if (IS_VALLEYVIEW(dev)) {
J
Jesse Barnes 已提交
836 837 838 839 840 841 842 843
		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
844
		for_each_pipe(dev_priv, pipe)
J
Jesse Barnes 已提交
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

		seq_printf(m, "Master IER:\t%08x\n",
			   I915_READ(VLV_MASTER_IER));

		seq_printf(m, "Render IER:\t%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Render IIR:\t%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Render IMR:\t%08x\n",
			   I915_READ(GTIMR));

		seq_printf(m, "PM IER:\t\t%08x\n",
			   I915_READ(GEN6_PMIER));
		seq_printf(m, "PM IIR:\t\t%08x\n",
			   I915_READ(GEN6_PMIIR));
		seq_printf(m, "PM IMR:\t\t%08x\n",
			   I915_READ(GEN6_PMIMR));

		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));

	} else if (!HAS_PCH_SPLIT(dev)) {
874 875 876 877 878 879
		seq_printf(m, "Interrupt enable:    %08x\n",
			   I915_READ(IER));
		seq_printf(m, "Interrupt identity:  %08x\n",
			   I915_READ(IIR));
		seq_printf(m, "Interrupt mask:      %08x\n",
			   I915_READ(IMR));
880
		for_each_pipe(dev_priv, pipe)
881 882 883
			seq_printf(m, "Pipe %c stat:         %08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
	} else {
		seq_printf(m, "North Display Interrupt enable:		%08x\n",
			   I915_READ(DEIER));
		seq_printf(m, "North Display Interrupt identity:	%08x\n",
			   I915_READ(DEIIR));
		seq_printf(m, "North Display Interrupt mask:		%08x\n",
			   I915_READ(DEIMR));
		seq_printf(m, "South Display Interrupt enable:		%08x\n",
			   I915_READ(SDEIER));
		seq_printf(m, "South Display Interrupt identity:	%08x\n",
			   I915_READ(SDEIIR));
		seq_printf(m, "South Display Interrupt mask:		%08x\n",
			   I915_READ(SDEIMR));
		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
			   I915_READ(GTIMR));
	}
904
	for_each_engine(engine, dev_priv) {
905
		if (INTEL_INFO(dev)->gen >= 6) {
906 907
			seq_printf(m,
				   "Graphics Interrupt mask (%s):	%08x\n",
908
				   engine->name, I915_READ_IMR(engine));
909
		}
910
		i915_ring_seqno_info(m, engine);
911
	}
912
	intel_runtime_pm_put(dev_priv);
913 914
	mutex_unlock(&dev->struct_mutex);

915 916 917
	return 0;
}

918 919
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
920
	struct drm_info_node *node = m->private;
921
	struct drm_device *dev = node->minor->dev;
922
	struct drm_i915_private *dev_priv = to_i915(dev);
923 924 925 926 927
	int i, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
928 929 930

	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
931
		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
932

C
Chris Wilson 已提交
933 934
		seq_printf(m, "Fence %d, pin count = %d, object = ",
			   i, dev_priv->fence_regs[i].pin_count);
935
		if (obj == NULL)
936
			seq_puts(m, "unused");
937
		else
938
			describe_obj(m, obj);
939
		seq_putc(m, '\n');
940 941
	}

942
	mutex_unlock(&dev->struct_mutex);
943 944 945
	return 0;
}

946 947
static int i915_hws_info(struct seq_file *m, void *data)
{
948
	struct drm_info_node *node = m->private;
949
	struct drm_device *dev = node->minor->dev;
950
	struct drm_i915_private *dev_priv = to_i915(dev);
951
	struct intel_engine_cs *engine;
D
Daniel Vetter 已提交
952
	const u32 *hws;
953 954
	int i;

955
	engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
956
	hws = engine->status_page.page_addr;
957 958 959 960 961 962 963 964 965 966 967
	if (hws == NULL)
		return 0;

	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
			   i * 4,
			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
	}
	return 0;
}

968 969 970 971 972 973
static ssize_t
i915_error_state_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
{
974
	struct i915_error_state_file_priv *error_priv = filp->private_data;
975
	struct drm_device *dev = error_priv->dev;
976
	int ret;
977 978 979

	DRM_DEBUG_DRIVER("Resetting error state\n");

980 981 982 983
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	i915_destroy_error_state(dev);
	mutex_unlock(&dev->struct_mutex);

	return cnt;
}

static int i915_error_state_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;
	struct i915_error_state_file_priv *error_priv;

	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
	if (!error_priv)
		return -ENOMEM;

	error_priv->dev = dev;

1001
	i915_error_state_get(dev, error_priv);
1002

1003 1004 1005
	file->private_data = error_priv;

	return 0;
1006 1007 1008 1009
}

static int i915_error_state_release(struct inode *inode, struct file *file)
{
1010
	struct i915_error_state_file_priv *error_priv = file->private_data;
1011

1012
	i915_error_state_put(error_priv);
1013 1014
	kfree(error_priv);

1015 1016 1017
	return 0;
}

1018 1019 1020 1021 1022 1023 1024 1025 1026
static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
				     size_t count, loff_t *pos)
{
	struct i915_error_state_file_priv *error_priv = file->private_data;
	struct drm_i915_error_state_buf error_str;
	loff_t tmp_pos = 0;
	ssize_t ret_count = 0;
	int ret;

1027
	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1028 1029
	if (ret)
		return ret;
1030

1031
	ret = i915_error_state_to_str(&error_str, error_priv);
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	if (ret)
		goto out;

	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
					    error_str.buf,
					    error_str.bytes);

	if (ret_count < 0)
		ret = ret_count;
	else
		*pos = error_str.start + ret_count;
out:
1044
	i915_error_state_buf_release(&error_str);
1045
	return ret ?: ret_count;
1046 1047 1048 1049 1050
}

static const struct file_operations i915_error_state_fops = {
	.owner = THIS_MODULE,
	.open = i915_error_state_open,
1051
	.read = i915_error_state_read,
1052 1053 1054 1055 1056
	.write = i915_error_state_write,
	.llseek = default_llseek,
	.release = i915_error_state_release,
};

1057 1058
static int
i915_next_seqno_get(void *data, u64 *val)
1059
{
1060
	struct drm_device *dev = data;
1061
	struct drm_i915_private *dev_priv = to_i915(dev);
1062 1063 1064 1065 1066 1067
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1068
	*val = dev_priv->next_seqno;
1069 1070
	mutex_unlock(&dev->struct_mutex);

1071
	return 0;
1072 1073
}

1074 1075 1076 1077
static int
i915_next_seqno_set(void *data, u64 val)
{
	struct drm_device *dev = data;
1078 1079 1080 1081 1082 1083
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1084
	ret = i915_gem_set_seqno(dev, val);
1085 1086
	mutex_unlock(&dev->struct_mutex);

1087
	return ret;
1088 1089
}

1090 1091
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
			i915_next_seqno_get, i915_next_seqno_set,
1092
			"0x%llx\n");
1093

1094
static int i915_frequency_info(struct seq_file *m, void *unused)
1095
{
1096
	struct drm_info_node *node = m->private;
1097
	struct drm_device *dev = node->minor->dev;
1098
	struct drm_i915_private *dev_priv = to_i915(dev);
1099 1100 1101
	int ret = 0;

	intel_runtime_pm_get(dev_priv);
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112

	if (IS_GEN5(dev)) {
		u16 rgvswctl = I915_READ16(MEMSWCTL);
		u16 rgvstat = I915_READ16(MEMSTAT_ILK);

		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
			   MEMSTAT_VID_SHIFT);
		seq_printf(m, "Current P-state: %d\n",
			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
		u32 freq_sts;

		mutex_lock(&dev_priv->rps.hw_lock);
		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);

		seq_printf(m, "actual GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));

		seq_printf(m, "current GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));

		seq_printf(m, "max GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));

		seq_printf(m, "min GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));

		seq_printf(m, "idle GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));

		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
		mutex_unlock(&dev_priv->rps.hw_lock);
	} else if (INTEL_INFO(dev)->gen >= 6) {
1141 1142 1143
		u32 rp_state_limits;
		u32 gt_perf_status;
		u32 rp_state_cap;
1144
		u32 rpmodectl, rpinclimit, rpdeclimit;
1145
		u32 rpstat, cagf, reqf;
1146 1147
		u32 rpupei, rpcurup, rpprevup;
		u32 rpdownei, rpcurdown, rpprevdown;
1148
		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1149 1150
		int max_freq;

1151 1152 1153 1154 1155 1156 1157 1158 1159
		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
		if (IS_BROXTON(dev)) {
			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
		} else {
			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
		}

1160
		/* RPSTAT1 is in the GT power well */
1161 1162
		ret = mutex_lock_interruptible(&dev->struct_mutex);
		if (ret)
1163
			goto out;
1164

1165
		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1166

1167
		reqf = I915_READ(GEN6_RPNSWREQ);
1168 1169 1170 1171 1172 1173 1174 1175 1176
		if (IS_GEN9(dev))
			reqf >>= 23;
		else {
			reqf &= ~GEN6_TURBO_DISABLE;
			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
				reqf >>= 24;
			else
				reqf >>= 25;
		}
1177
		reqf = intel_gpu_freq(dev_priv, reqf);
1178

1179 1180 1181 1182
		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);

1183
		rpstat = I915_READ(GEN6_RPSTAT1);
1184 1185 1186 1187 1188 1189
		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1190 1191 1192
		if (IS_GEN9(dev))
			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
B
Ben Widawsky 已提交
1193 1194 1195
			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
		else
			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1196
		cagf = intel_gpu_freq(dev_priv, cagf);
1197

1198
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1199 1200
		mutex_unlock(&dev->struct_mutex);

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
		if (IS_GEN6(dev) || IS_GEN7(dev)) {
			pm_ier = I915_READ(GEN6_PMIER);
			pm_imr = I915_READ(GEN6_PMIMR);
			pm_isr = I915_READ(GEN6_PMISR);
			pm_iir = I915_READ(GEN6_PMIIR);
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		} else {
			pm_ier = I915_READ(GEN8_GT_IER(2));
			pm_imr = I915_READ(GEN8_GT_IMR(2));
			pm_isr = I915_READ(GEN8_GT_ISR(2));
			pm_iir = I915_READ(GEN8_GT_IIR(2));
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		}
1214
		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1215
			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1216
		seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
1217 1218
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "Render p-state ratio: %d\n",
1219
			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1220 1221 1222 1223
		seq_printf(m, "Render p-state VID: %d\n",
			   gt_perf_status & 0xff);
		seq_printf(m, "Render p-state limit: %d\n",
			   rp_state_limits & 0xff);
1224 1225 1226 1227
		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1228
		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
B
Ben Widawsky 已提交
1229
		seq_printf(m, "CAGF: %dMHz\n", cagf);
1230 1231 1232 1233 1234 1235
		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
		seq_printf(m, "RP CUR UP: %d (%dus)\n",
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1236 1237 1238
		seq_printf(m, "Up threshold: %d%%\n",
			   dev_priv->rps.up_threshold);

1239 1240 1241 1242 1243 1244
		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1245 1246
		seq_printf(m, "Down threshold: %d%%\n",
			   dev_priv->rps.down_threshold);
1247

1248 1249
		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
			    rp_state_cap >> 16) & 0xff;
1250 1251
		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
			     GEN9_FREQ_SCALER : 1);
1252
		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1253
			   intel_gpu_freq(dev_priv, max_freq));
1254 1255

		max_freq = (rp_state_cap & 0xff00) >> 8;
1256 1257
		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
			     GEN9_FREQ_SCALER : 1);
1258
		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1259
			   intel_gpu_freq(dev_priv, max_freq));
1260

1261 1262
		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
			    rp_state_cap >> 0) & 0xff;
1263 1264
		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
			     GEN9_FREQ_SCALER : 1);
1265
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1266
			   intel_gpu_freq(dev_priv, max_freq));
1267
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1268
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1269

1270 1271 1272
		seq_printf(m, "Current freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1273 1274
		seq_printf(m, "Idle freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1275 1276
		seq_printf(m, "Min freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1277 1278
		seq_printf(m, "Boost freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1279 1280 1281 1282 1283
		seq_printf(m, "Max freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1284
	} else {
1285
		seq_puts(m, "no P-state info available\n");
1286
	}
1287

1288 1289 1290 1291
	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);

1292 1293 1294
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1295 1296
}

1297 1298 1299
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
1300
	struct drm_device *dev = node->minor->dev;
1301
	struct drm_i915_private *dev_priv = to_i915(dev);
1302
	struct intel_engine_cs *engine;
1303 1304
	u64 acthd[I915_NUM_ENGINES];
	u32 seqno[I915_NUM_ENGINES];
1305
	u32 instdone[I915_NUM_INSTDONE_REG];
1306 1307
	enum intel_engine_id id;
	int j;
1308 1309 1310 1311 1312 1313

	if (!i915.enable_hangcheck) {
		seq_printf(m, "Hangcheck disabled\n");
		return 0;
	}

1314 1315
	intel_runtime_pm_get(dev_priv);

1316
	for_each_engine_id(engine, dev_priv, id) {
1317
		acthd[id] = intel_engine_get_active_head(engine);
1318
		seqno[id] = intel_engine_get_seqno(engine);
1319 1320
	}

1321
	i915_get_extra_instdone(dev_priv, instdone);
1322

1323 1324
	intel_runtime_pm_put(dev_priv);

1325 1326 1327 1328 1329 1330 1331
	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
		seq_printf(m, "Hangcheck active, fires in %dms\n",
			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
					    jiffies));
	} else
		seq_printf(m, "Hangcheck inactive\n");

1332
	for_each_engine_id(engine, dev_priv, id) {
1333
		seq_printf(m, "%s:\n", engine->name);
1334 1335 1336 1337
		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
			   engine->hangcheck.seqno,
			   seqno[id],
			   engine->last_submitted_seqno);
1338 1339 1340 1341
		seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
			   yesno(intel_engine_has_waiter(engine)),
			   yesno(test_bit(engine->id,
					  &dev_priv->gpu_error.missed_irq_rings)));
1342
		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1343
			   (long long)engine->hangcheck.acthd,
1344
			   (long long)acthd[id]);
1345 1346
		seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
		seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
1347

1348
		if (engine->id == RCS) {
1349 1350 1351 1352 1353 1354 1355 1356 1357
			seq_puts(m, "\tinstdone read =");

			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
				seq_printf(m, " 0x%08x", instdone[j]);

			seq_puts(m, "\n\tinstdone accu =");

			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
				seq_printf(m, " 0x%08x",
1358
					   engine->hangcheck.instdone[j]);
1359 1360 1361

			seq_puts(m, "\n");
		}
1362 1363 1364 1365 1366
	}

	return 0;
}

1367
static int ironlake_drpc_info(struct seq_file *m)
1368
{
1369
	struct drm_info_node *node = m->private;
1370
	struct drm_device *dev = node->minor->dev;
1371
	struct drm_i915_private *dev_priv = to_i915(dev);
1372 1373 1374 1375 1376 1377 1378
	u32 rgvmodectl, rstdbyctl;
	u16 crstandvid;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1379
	intel_runtime_pm_get(dev_priv);
1380 1381 1382 1383 1384

	rgvmodectl = I915_READ(MEMMODECTL);
	rstdbyctl = I915_READ(RSTDBYCTL);
	crstandvid = I915_READ16(CRSTANDVID);

1385
	intel_runtime_pm_put(dev_priv);
1386
	mutex_unlock(&dev->struct_mutex);
1387

1388
	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1389 1390 1391 1392
	seq_printf(m, "Boost freq: %d\n",
		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
		   MEMMODE_BOOST_FREQ_SHIFT);
	seq_printf(m, "HW control enabled: %s\n",
1393
		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1394
	seq_printf(m, "SW control enabled: %s\n",
1395
		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1396
	seq_printf(m, "Gated voltage change: %s\n",
1397
		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1398 1399
	seq_printf(m, "Starting frequency: P%d\n",
		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1400
	seq_printf(m, "Max P-state: P%d\n",
1401
		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1402 1403 1404 1405
	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
	seq_printf(m, "Render standby enabled: %s\n",
1406
		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1407
	seq_puts(m, "Current RS state: ");
1408 1409
	switch (rstdbyctl & RSX_STATUS_MASK) {
	case RSX_STATUS_ON:
1410
		seq_puts(m, "on\n");
1411 1412
		break;
	case RSX_STATUS_RC1:
1413
		seq_puts(m, "RC1\n");
1414 1415
		break;
	case RSX_STATUS_RC1E:
1416
		seq_puts(m, "RC1E\n");
1417 1418
		break;
	case RSX_STATUS_RS1:
1419
		seq_puts(m, "RS1\n");
1420 1421
		break;
	case RSX_STATUS_RS2:
1422
		seq_puts(m, "RS2 (RC6)\n");
1423 1424
		break;
	case RSX_STATUS_RS3:
1425
		seq_puts(m, "RC3 (RC6+)\n");
1426 1427
		break;
	default:
1428
		seq_puts(m, "unknown\n");
1429 1430
		break;
	}
1431 1432 1433 1434

	return 0;
}

1435
static int i915_forcewake_domains(struct seq_file *m, void *data)
1436
{
1437 1438
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
1439
	struct drm_i915_private *dev_priv = to_i915(dev);
1440 1441 1442
	struct intel_uncore_forcewake_domain *fw_domain;

	spin_lock_irq(&dev_priv->uncore.lock);
1443
	for_each_fw_domain(fw_domain, dev_priv) {
1444
		seq_printf(m, "%s.wake_count = %u\n",
1445
			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1446 1447 1448
			   fw_domain->wake_count);
	}
	spin_unlock_irq(&dev_priv->uncore.lock);
1449

1450 1451 1452 1453 1454
	return 0;
}

static int vlv_drpc_info(struct seq_file *m)
{
1455
	struct drm_info_node *node = m->private;
1456
	struct drm_device *dev = node->minor->dev;
1457
	struct drm_i915_private *dev_priv = to_i915(dev);
1458
	u32 rpmodectl1, rcctl1, pw_status;
1459

1460 1461
	intel_runtime_pm_get(dev_priv);

1462
	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1463 1464 1465
	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);

1466 1467
	intel_runtime_pm_put(dev_priv);

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "Turbo enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
					GEN6_RC_CTL_EI_MODE(1))));
	seq_printf(m, "Render Power Well: %s\n",
1481
		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1482
	seq_printf(m, "Media Power Well: %s\n",
1483
		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1484

1485 1486 1487 1488 1489
	seq_printf(m, "Render RC6 residency since boot: %u\n",
		   I915_READ(VLV_GT_RENDER_RC6));
	seq_printf(m, "Media RC6 residency since boot: %u\n",
		   I915_READ(VLV_GT_MEDIA_RC6));

1490
	return i915_forcewake_domains(m, NULL);
1491 1492
}

1493 1494
static int gen6_drpc_info(struct seq_file *m)
{
1495
	struct drm_info_node *node = m->private;
1496
	struct drm_device *dev = node->minor->dev;
1497
	struct drm_i915_private *dev_priv = to_i915(dev);
B
Ben Widawsky 已提交
1498
	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1499
	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1500
	unsigned forcewake_count;
1501
	int count = 0, ret;
1502 1503 1504 1505

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1506
	intel_runtime_pm_get(dev_priv);
1507

1508
	spin_lock_irq(&dev_priv->uncore.lock);
1509
	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1510
	spin_unlock_irq(&dev_priv->uncore.lock);
1511 1512

	if (forcewake_count) {
1513 1514
		seq_puts(m, "RC information inaccurate because somebody "
			    "holds a forcewake reference \n");
1515 1516 1517 1518 1519 1520 1521
	} else {
		/* NB: we cannot use forcewake, else we read the wrong values */
		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
			udelay(10);
		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
	}

1522
	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1523
	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1524 1525 1526

	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1527 1528 1529 1530
	if (INTEL_INFO(dev)->gen >= 9) {
		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
	}
1531
	mutex_unlock(&dev->struct_mutex);
1532 1533 1534
	mutex_lock(&dev_priv->rps.hw_lock);
	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
	mutex_unlock(&dev_priv->rps.hw_lock);
1535

1536 1537
	intel_runtime_pm_put(dev_priv);

1538 1539 1540 1541 1542 1543 1544
	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
1545
	seq_printf(m, "RC1e Enabled: %s\n",
1546 1547 1548
		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1549 1550 1551 1552 1553 1554
	if (INTEL_INFO(dev)->gen >= 9) {
		seq_printf(m, "Render Well Gating Enabled: %s\n",
			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
		seq_printf(m, "Media Well Gating Enabled: %s\n",
			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
	}
1555 1556 1557 1558
	seq_printf(m, "Deep RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
	seq_printf(m, "Deepest RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1559
	seq_puts(m, "Current RC state: ");
1560 1561 1562
	switch (gt_core_status & GEN6_RCn_MASK) {
	case GEN6_RC0:
		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1563
			seq_puts(m, "Core Power Down\n");
1564
		else
1565
			seq_puts(m, "on\n");
1566 1567
		break;
	case GEN6_RC3:
1568
		seq_puts(m, "RC3\n");
1569 1570
		break;
	case GEN6_RC6:
1571
		seq_puts(m, "RC6\n");
1572 1573
		break;
	case GEN6_RC7:
1574
		seq_puts(m, "RC7\n");
1575 1576
		break;
	default:
1577
		seq_puts(m, "Unknown\n");
1578 1579 1580 1581 1582
		break;
	}

	seq_printf(m, "Core Power Down: %s\n",
		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1583 1584 1585 1586 1587 1588 1589 1590
	if (INTEL_INFO(dev)->gen >= 9) {
		seq_printf(m, "Render Power Well: %s\n",
			(gen9_powergate_status &
			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
		seq_printf(m, "Media Power Well: %s\n",
			(gen9_powergate_status &
			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
	}
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601

	/* Not exactly sure what this is */
	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
	seq_printf(m, "RC6 residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6));
	seq_printf(m, "RC6+ residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6p));
	seq_printf(m, "RC6++ residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6pp));

B
Ben Widawsky 已提交
1602 1603 1604 1605 1606 1607
	seq_printf(m, "RC6   voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
	seq_printf(m, "RC6+  voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
	seq_printf(m, "RC6++ voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1608
	return i915_forcewake_domains(m, NULL);
1609 1610 1611 1612
}

static int i915_drpc_info(struct seq_file *m, void *unused)
{
1613
	struct drm_info_node *node = m->private;
1614 1615
	struct drm_device *dev = node->minor->dev;

1616
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1617
		return vlv_drpc_info(m);
1618
	else if (INTEL_INFO(dev)->gen >= 6)
1619 1620 1621 1622 1623
		return gen6_drpc_info(m);
	else
		return ironlake_drpc_info(m);
}

1624 1625 1626 1627
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
1628
	struct drm_i915_private *dev_priv = to_i915(dev);
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638

	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
		   dev_priv->fb_tracking.busy_bits);

	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
		   dev_priv->fb_tracking.flip_bits);

	return 0;
}

1639 1640
static int i915_fbc_status(struct seq_file *m, void *unused)
{
1641
	struct drm_info_node *node = m->private;
1642
	struct drm_device *dev = node->minor->dev;
1643
	struct drm_i915_private *dev_priv = to_i915(dev);
1644

1645
	if (!HAS_FBC(dev)) {
1646
		seq_puts(m, "FBC unsupported on this chipset\n");
1647 1648 1649
		return 0;
	}

1650
	intel_runtime_pm_get(dev_priv);
P
Paulo Zanoni 已提交
1651
	mutex_lock(&dev_priv->fbc.lock);
1652

1653
	if (intel_fbc_is_active(dev_priv))
1654
		seq_puts(m, "FBC enabled\n");
1655 1656
	else
		seq_printf(m, "FBC disabled: %s\n",
1657
			   dev_priv->fbc.no_fbc_reason);
1658

1659 1660 1661 1662 1663
	if (INTEL_INFO(dev_priv)->gen >= 7)
		seq_printf(m, "Compressing: %s\n",
			   yesno(I915_READ(FBC_STATUS2) &
				 FBC_COMPRESSION_MASK));

P
Paulo Zanoni 已提交
1664
	mutex_unlock(&dev_priv->fbc.lock);
1665 1666
	intel_runtime_pm_put(dev_priv);

1667 1668 1669
	return 0;
}

1670 1671 1672
static int i915_fbc_fc_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
1673
	struct drm_i915_private *dev_priv = to_i915(dev);
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685

	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
		return -ENODEV;

	*val = dev_priv->fbc.false_color;

	return 0;
}

static int i915_fbc_fc_set(void *data, u64 val)
{
	struct drm_device *dev = data;
1686
	struct drm_i915_private *dev_priv = to_i915(dev);
1687 1688 1689 1690 1691
	u32 reg;

	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
		return -ENODEV;

P
Paulo Zanoni 已提交
1692
	mutex_lock(&dev_priv->fbc.lock);
1693 1694 1695 1696 1697 1698 1699 1700

	reg = I915_READ(ILK_DPFC_CONTROL);
	dev_priv->fbc.false_color = val;

	I915_WRITE(ILK_DPFC_CONTROL, val ?
		   (reg | FBC_CTL_FALSE_COLOR) :
		   (reg & ~FBC_CTL_FALSE_COLOR));

P
Paulo Zanoni 已提交
1701
	mutex_unlock(&dev_priv->fbc.lock);
1702 1703 1704 1705 1706 1707 1708
	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
			i915_fbc_fc_get, i915_fbc_fc_set,
			"%llu\n");

1709 1710
static int i915_ips_status(struct seq_file *m, void *unused)
{
1711
	struct drm_info_node *node = m->private;
1712
	struct drm_device *dev = node->minor->dev;
1713
	struct drm_i915_private *dev_priv = to_i915(dev);
1714

1715
	if (!HAS_IPS(dev)) {
1716 1717 1718 1719
		seq_puts(m, "not supported\n");
		return 0;
	}

1720 1721
	intel_runtime_pm_get(dev_priv);

1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
	seq_printf(m, "Enabled by kernel parameter: %s\n",
		   yesno(i915.enable_ips));

	if (INTEL_INFO(dev)->gen >= 8) {
		seq_puts(m, "Currently: unknown\n");
	} else {
		if (I915_READ(IPS_CTL) & IPS_ENABLE)
			seq_puts(m, "Currently: enabled\n");
		else
			seq_puts(m, "Currently: disabled\n");
	}
1733

1734 1735
	intel_runtime_pm_put(dev_priv);

1736 1737 1738
	return 0;
}

1739 1740
static int i915_sr_status(struct seq_file *m, void *unused)
{
1741
	struct drm_info_node *node = m->private;
1742
	struct drm_device *dev = node->minor->dev;
1743
	struct drm_i915_private *dev_priv = to_i915(dev);
1744 1745
	bool sr_enabled = false;

1746 1747
	intel_runtime_pm_get(dev_priv);

1748
	if (HAS_PCH_SPLIT(dev))
1749
		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1750 1751
	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
		 IS_I945G(dev) || IS_I945GM(dev))
1752 1753 1754 1755 1756
		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
	else if (IS_I915GM(dev))
		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
	else if (IS_PINEVIEW(dev))
		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1757
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1758
		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1759

1760 1761
	intel_runtime_pm_put(dev_priv);

1762 1763
	seq_printf(m, "self-refresh: %s\n",
		   sr_enabled ? "enabled" : "disabled");
1764 1765 1766 1767

	return 0;
}

1768 1769
static int i915_emon_status(struct seq_file *m, void *unused)
{
1770
	struct drm_info_node *node = m->private;
1771
	struct drm_device *dev = node->minor->dev;
1772
	struct drm_i915_private *dev_priv = to_i915(dev);
1773
	unsigned long temp, chipset, gfx;
1774 1775
	int ret;

1776 1777 1778
	if (!IS_GEN5(dev))
		return -ENODEV;

1779 1780 1781
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1782 1783 1784 1785

	temp = i915_mch_val(dev_priv);
	chipset = i915_chipset_val(dev_priv);
	gfx = i915_gfx_val(dev_priv);
1786
	mutex_unlock(&dev->struct_mutex);
1787 1788 1789 1790 1791 1792 1793 1794 1795

	seq_printf(m, "GMCH temp: %ld\n", temp);
	seq_printf(m, "Chipset power: %ld\n", chipset);
	seq_printf(m, "GFX power: %ld\n", gfx);
	seq_printf(m, "Total power: %ld\n", chipset + gfx);

	return 0;
}

1796 1797
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
1798
	struct drm_info_node *node = m->private;
1799
	struct drm_device *dev = node->minor->dev;
1800
	struct drm_i915_private *dev_priv = to_i915(dev);
1801
	int ret = 0;
1802
	int gpu_freq, ia_freq;
1803
	unsigned int max_gpu_freq, min_gpu_freq;
1804

1805
	if (!HAS_CORE_RING_FREQ(dev)) {
1806
		seq_puts(m, "unsupported on this chipset\n");
1807 1808 1809
		return 0;
	}

1810 1811
	intel_runtime_pm_get(dev_priv);

1812
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1813
	if (ret)
1814
		goto out;
1815

1816
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
		/* Convert GT frequency to 50 HZ units */
		min_gpu_freq =
			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
		max_gpu_freq =
			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
	} else {
		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
	}

1827
	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1828

1829
	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
B
Ben Widawsky 已提交
1830 1831 1832 1833
		ia_freq = gpu_freq;
		sandybridge_pcode_read(dev_priv,
				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
				       &ia_freq);
1834
		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1835
			   intel_gpu_freq(dev_priv, (gpu_freq *
1836 1837
				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
				 GEN9_FREQ_SCALER : 1))),
1838 1839
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
1840 1841
	}

1842
	mutex_unlock(&dev_priv->rps.hw_lock);
1843

1844 1845 1846
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1847 1848
}

1849 1850
static int i915_opregion(struct seq_file *m, void *unused)
{
1851
	struct drm_info_node *node = m->private;
1852
	struct drm_device *dev = node->minor->dev;
1853
	struct drm_i915_private *dev_priv = to_i915(dev);
1854 1855 1856 1857 1858
	struct intel_opregion *opregion = &dev_priv->opregion;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
1859
		goto out;
1860

1861 1862
	if (opregion->header)
		seq_write(m, opregion->header, OPREGION_SIZE);
1863 1864 1865

	mutex_unlock(&dev->struct_mutex);

1866
out:
1867 1868 1869
	return 0;
}

1870 1871 1872 1873
static int i915_vbt(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
1874
	struct drm_i915_private *dev_priv = to_i915(dev);
1875 1876 1877 1878 1879 1880 1881 1882
	struct intel_opregion *opregion = &dev_priv->opregion;

	if (opregion->vbt)
		seq_write(m, opregion->vbt, opregion->vbt_size);

	return 0;
}

1883 1884
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
1885
	struct drm_info_node *node = m->private;
1886
	struct drm_device *dev = node->minor->dev;
1887
	struct intel_framebuffer *fbdev_fb = NULL;
1888
	struct drm_framebuffer *drm_fb;
1889 1890 1891 1892 1893
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1894

1895
#ifdef CONFIG_DRM_FBDEV_EMULATION
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	if (to_i915(dev)->fbdev) {
		fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);

		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
			   fbdev_fb->base.width,
			   fbdev_fb->base.height,
			   fbdev_fb->base.depth,
			   fbdev_fb->base.bits_per_pixel,
			   fbdev_fb->base.modifier[0],
			   drm_framebuffer_read_refcount(&fbdev_fb->base));
		describe_obj(m, fbdev_fb->obj);
		seq_putc(m, '\n');
	}
1909
#endif
1910

1911
	mutex_lock(&dev->mode_config.fb_lock);
1912
	drm_for_each_fb(drm_fb, dev) {
1913 1914
		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
		if (fb == fbdev_fb)
1915 1916
			continue;

1917
		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1918 1919 1920
			   fb->base.width,
			   fb->base.height,
			   fb->base.depth,
1921
			   fb->base.bits_per_pixel,
1922
			   fb->base.modifier[0],
1923
			   drm_framebuffer_read_refcount(&fb->base));
1924
		describe_obj(m, fb->obj);
1925
		seq_putc(m, '\n');
1926
	}
1927
	mutex_unlock(&dev->mode_config.fb_lock);
1928
	mutex_unlock(&dev->struct_mutex);
1929 1930 1931 1932

	return 0;
}

1933
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1934 1935
{
	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1936 1937
		   ring->space, ring->head, ring->tail,
		   ring->last_retired_head);
1938 1939
}

1940 1941
static int i915_context_status(struct seq_file *m, void *unused)
{
1942
	struct drm_info_node *node = m->private;
1943
	struct drm_device *dev = node->minor->dev;
1944
	struct drm_i915_private *dev_priv = to_i915(dev);
1945
	struct intel_engine_cs *engine;
1946
	struct i915_gem_context *ctx;
1947
	int ret;
1948

1949
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1950 1951 1952
	if (ret)
		return ret;

1953
	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1954
		seq_printf(m, "HW context %u ", ctx->hw_id);
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
		if (IS_ERR(ctx->file_priv)) {
			seq_puts(m, "(deleted) ");
		} else if (ctx->file_priv) {
			struct pid *pid = ctx->file_priv->file->pid;
			struct task_struct *task;

			task = get_pid_task(pid, PIDTYPE_PID);
			if (task) {
				seq_printf(m, "(%s [%d]) ",
					   task->comm, task->pid);
				put_task_struct(task);
			}
		} else {
			seq_puts(m, "(kernel) ");
		}

1971 1972
		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
		seq_putc(m, '\n');
1973

1974 1975 1976 1977 1978 1979 1980
		for_each_engine(engine, dev_priv) {
			struct intel_context *ce = &ctx->engine[engine->id];

			seq_printf(m, "%s: ", engine->name);
			seq_putc(m, ce->initialised ? 'I' : 'i');
			if (ce->state)
				describe_obj(m, ce->state);
1981
			if (ce->ring)
1982
				describe_ctx_ring(m, ce->ring);
1983 1984
			seq_putc(m, '\n');
		}
1985 1986

		seq_putc(m, '\n');
1987 1988
	}

1989
	mutex_unlock(&dev->struct_mutex);
1990 1991 1992 1993

	return 0;
}

1994
static void i915_dump_lrc_obj(struct seq_file *m,
1995
			      struct i915_gem_context *ctx,
1996
			      struct intel_engine_cs *engine)
1997
{
1998
	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1999 2000 2001 2002 2003
	struct page *page;
	uint32_t *reg_state;
	int j;
	unsigned long ggtt_offset = 0;

2004 2005
	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);

2006
	if (ctx_obj == NULL) {
2007
		seq_puts(m, "\tNot allocated\n");
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
		return;
	}

	if (!i915_gem_obj_ggtt_bound(ctx_obj))
		seq_puts(m, "\tNot bound in GGTT\n");
	else
		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);

	if (i915_gem_object_get_pages(ctx_obj)) {
		seq_puts(m, "\tFailed to get pages for context object\n");
		return;
	}

2021
	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
	if (!WARN_ON(page == NULL)) {
		reg_state = kmap_atomic(page);

		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
				   ggtt_offset + 4096 + (j * 4),
				   reg_state[j], reg_state[j + 1],
				   reg_state[j + 2], reg_state[j + 3]);
		}
		kunmap_atomic(reg_state);
	}

	seq_putc(m, '\n');
}

2037 2038 2039 2040
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
2041
	struct drm_i915_private *dev_priv = to_i915(dev);
2042
	struct intel_engine_cs *engine;
2043
	struct i915_gem_context *ctx;
2044
	int ret;
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054

	if (!i915.enable_execlists) {
		seq_printf(m, "Logical Ring Contexts are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

D
Dave Gordon 已提交
2055
	list_for_each_entry(ctx, &dev_priv->context_list, link)
2056 2057
		for_each_engine(engine, dev_priv)
			i915_dump_lrc_obj(m, ctx, engine);
2058 2059 2060 2061 2062 2063

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

2064 2065 2066 2067
static int i915_execlists(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
2068
	struct drm_i915_private *dev_priv = to_i915(dev);
2069
	struct intel_engine_cs *engine;
2070 2071 2072 2073 2074 2075
	u32 status_pointer;
	u8 read_pointer;
	u8 write_pointer;
	u32 status;
	u32 ctx_id;
	struct list_head *cursor;
2076
	int i, ret;
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086

	if (!i915.enable_execlists) {
		seq_puts(m, "Logical Ring Contexts are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

2087 2088
	intel_runtime_pm_get(dev_priv);

2089
	for_each_engine(engine, dev_priv) {
2090
		struct drm_i915_gem_request *head_req = NULL;
2091 2092
		int count = 0;

2093
		seq_printf(m, "%s\n", engine->name);
2094

2095 2096
		status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
2097 2098 2099
		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
			   status, ctx_id);

2100
		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
2101 2102
		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);

2103
		read_pointer = engine->next_context_status_buffer;
2104
		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2105
		if (read_pointer > write_pointer)
2106
			write_pointer += GEN8_CSB_ENTRIES;
2107 2108 2109
		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
			   read_pointer, write_pointer);

2110
		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2111 2112
			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
2113 2114 2115 2116 2117

			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
				   i, status, ctx_id);
		}

2118
		spin_lock_bh(&engine->execlist_lock);
2119
		list_for_each(cursor, &engine->execlist_queue)
2120
			count++;
2121 2122 2123
		head_req = list_first_entry_or_null(&engine->execlist_queue,
						    struct drm_i915_gem_request,
						    execlist_link);
2124
		spin_unlock_bh(&engine->execlist_lock);
2125 2126 2127

		seq_printf(m, "\t%d requests in queue\n", count);
		if (head_req) {
2128 2129
			seq_printf(m, "\tHead request context: %u\n",
				   head_req->ctx->hw_id);
2130
			seq_printf(m, "\tHead request tail: %u\n",
2131
				   head_req->tail);
2132 2133 2134 2135 2136
		}

		seq_putc(m, '\n');
	}

2137
	intel_runtime_pm_put(dev_priv);
2138 2139 2140 2141 2142
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

2143 2144
static const char *swizzle_string(unsigned swizzle)
{
2145
	switch (swizzle) {
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
	case I915_BIT_6_SWIZZLE_NONE:
		return "none";
	case I915_BIT_6_SWIZZLE_9:
		return "bit9";
	case I915_BIT_6_SWIZZLE_9_10:
		return "bit9/bit10";
	case I915_BIT_6_SWIZZLE_9_11:
		return "bit9/bit11";
	case I915_BIT_6_SWIZZLE_9_10_11:
		return "bit9/bit10/bit11";
	case I915_BIT_6_SWIZZLE_9_17:
		return "bit9/bit17";
	case I915_BIT_6_SWIZZLE_9_10_17:
		return "bit9/bit10/bit17";
	case I915_BIT_6_SWIZZLE_UNKNOWN:
2161
		return "unknown";
2162 2163 2164 2165 2166 2167 2168
	}

	return "bug";
}

static int i915_swizzle_info(struct seq_file *m, void *data)
{
2169
	struct drm_info_node *node = m->private;
2170
	struct drm_device *dev = node->minor->dev;
2171
	struct drm_i915_private *dev_priv = to_i915(dev);
2172 2173 2174 2175 2176
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
2177
	intel_runtime_pm_get(dev_priv);
2178 2179 2180 2181 2182 2183 2184 2185 2186

	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));

	if (IS_GEN3(dev) || IS_GEN4(dev)) {
		seq_printf(m, "DDC = 0x%08x\n",
			   I915_READ(DCC));
2187 2188
		seq_printf(m, "DDC2 = 0x%08x\n",
			   I915_READ(DCC2));
2189 2190 2191 2192
		seq_printf(m, "C0DRB3 = 0x%04x\n",
			   I915_READ16(C0DRB3));
		seq_printf(m, "C1DRB3 = 0x%04x\n",
			   I915_READ16(C1DRB3));
B
Ben Widawsky 已提交
2193
	} else if (INTEL_INFO(dev)->gen >= 6) {
2194 2195 2196 2197 2198 2199 2200 2201
		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C0));
		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C1));
		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C2));
		seq_printf(m, "TILECTL = 0x%08x\n",
			   I915_READ(TILECTL));
2202
		if (INTEL_INFO(dev)->gen >= 8)
B
Ben Widawsky 已提交
2203 2204 2205 2206 2207
			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
				   I915_READ(GAMTARBMODE));
		else
			seq_printf(m, "ARB_MODE = 0x%08x\n",
				   I915_READ(ARB_MODE));
2208 2209
		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
			   I915_READ(DISP_ARB_CTL));
2210
	}
2211 2212 2213 2214

	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		seq_puts(m, "L-shaped memory detected\n");

2215
	intel_runtime_pm_put(dev_priv);
2216 2217 2218 2219 2220
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

B
Ben Widawsky 已提交
2221 2222
static int per_file_ctx(int id, void *ptr, void *data)
{
2223
	struct i915_gem_context *ctx = ptr;
B
Ben Widawsky 已提交
2224
	struct seq_file *m = data;
2225 2226 2227 2228 2229 2230 2231
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;

	if (!ppgtt) {
		seq_printf(m, "  no ppgtt for context %d\n",
			   ctx->user_handle);
		return 0;
	}
B
Ben Widawsky 已提交
2232

2233 2234 2235
	if (i915_gem_context_is_default(ctx))
		seq_puts(m, "  default context:\n");
	else
2236
		seq_printf(m, "  context %d:\n", ctx->user_handle);
B
Ben Widawsky 已提交
2237 2238 2239 2240 2241
	ppgtt->debug_dump(ppgtt, m);

	return 0;
}

B
Ben Widawsky 已提交
2242
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
D
Daniel Vetter 已提交
2243
{
2244
	struct drm_i915_private *dev_priv = to_i915(dev);
2245
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
2246
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2247
	int i;
D
Daniel Vetter 已提交
2248

B
Ben Widawsky 已提交
2249 2250 2251
	if (!ppgtt)
		return;

2252
	for_each_engine(engine, dev_priv) {
2253
		seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
2254
		for (i = 0; i < 4; i++) {
2255
			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
B
Ben Widawsky 已提交
2256
			pdp <<= 32;
2257
			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2258
			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
B
Ben Widawsky 已提交
2259 2260 2261 2262 2263 2264
		}
	}
}

static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
2265
	struct drm_i915_private *dev_priv = to_i915(dev);
2266
	struct intel_engine_cs *engine;
D
Daniel Vetter 已提交
2267

2268
	if (IS_GEN6(dev_priv))
D
Daniel Vetter 已提交
2269 2270
		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));

2271
	for_each_engine(engine, dev_priv) {
2272
		seq_printf(m, "%s\n", engine->name);
2273
		if (IS_GEN7(dev_priv))
2274 2275 2276 2277 2278 2279 2280 2281
			seq_printf(m, "GFX_MODE: 0x%08x\n",
				   I915_READ(RING_MODE_GEN7(engine)));
		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE(engine)));
		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
			   I915_READ(RING_PP_DIR_DCLV(engine)));
D
Daniel Vetter 已提交
2282 2283 2284 2285
	}
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

2286
		seq_puts(m, "aliasing PPGTT:\n");
2287
		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
B
Ben Widawsky 已提交
2288

B
Ben Widawsky 已提交
2289
		ppgtt->debug_dump(ppgtt, m);
2290
	}
B
Ben Widawsky 已提交
2291

D
Daniel Vetter 已提交
2292
	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
B
Ben Widawsky 已提交
2293 2294 2295 2296
}

static int i915_ppgtt_info(struct seq_file *m, void *data)
{
2297
	struct drm_info_node *node = m->private;
B
Ben Widawsky 已提交
2298
	struct drm_device *dev = node->minor->dev;
2299
	struct drm_i915_private *dev_priv = to_i915(dev);
2300
	struct drm_file *file;
B
Ben Widawsky 已提交
2301 2302 2303 2304

	int ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
2305
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
2306 2307 2308 2309 2310 2311

	if (INTEL_INFO(dev)->gen >= 8)
		gen8_ppgtt_info(m, dev);
	else if (INTEL_INFO(dev)->gen >= 6)
		gen6_ppgtt_info(m, dev);

2312
	mutex_lock(&dev->filelist_mutex);
2313 2314
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
2315
		struct task_struct *task;
2316

2317
		task = get_pid_task(file->pid, PIDTYPE_PID);
2318 2319
		if (!task) {
			ret = -ESRCH;
2320
			goto out_unlock;
2321
		}
2322 2323
		seq_printf(m, "\nproc: %s\n", task->comm);
		put_task_struct(task);
2324 2325 2326
		idr_for_each(&file_priv->context_idr, per_file_ctx,
			     (void *)(unsigned long)m);
	}
2327
out_unlock:
2328
	mutex_unlock(&dev->filelist_mutex);
2329

2330
	intel_runtime_pm_put(dev_priv);
D
Daniel Vetter 已提交
2331 2332
	mutex_unlock(&dev->struct_mutex);

2333
	return ret;
D
Daniel Vetter 已提交
2334 2335
}

2336 2337
static int count_irq_waiters(struct drm_i915_private *i915)
{
2338
	struct intel_engine_cs *engine;
2339 2340
	int count = 0;

2341
	for_each_engine(engine, i915)
2342
		count += intel_engine_has_waiter(engine);
2343 2344 2345 2346

	return count;
}

2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
static const char *rps_power_to_str(unsigned int power)
{
	static const char * const strings[] = {
		[LOW_POWER] = "low power",
		[BETWEEN] = "mixed",
		[HIGH_POWER] = "high power",
	};

	if (power >= ARRAY_SIZE(strings) || !strings[power])
		return "unknown";

	return strings[power];
}

2361 2362 2363 2364
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
2365
	struct drm_i915_private *dev_priv = to_i915(dev);
2366 2367
	struct drm_file *file;

2368
	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2369 2370
	seq_printf(m, "GPU busy? %s [%x]\n",
		   yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
2371
	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2372 2373 2374
	seq_printf(m, "Frequency requested %d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2375 2376 2377 2378
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2379 2380 2381 2382
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
2383 2384

	mutex_lock(&dev->filelist_mutex);
2385
	spin_lock(&dev_priv->rps.client_lock);
2386 2387 2388 2389 2390 2391 2392 2393 2394
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "%s [%d]: %d boosts%s\n",
			   task ? task->comm : "<unknown>",
			   task ? task->pid : -1,
2395 2396
			   file_priv->rps.boosts,
			   list_empty(&file_priv->rps.link) ? "" : ", active");
2397 2398
		rcu_read_unlock();
	}
2399
	seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
2400
	spin_unlock(&dev_priv->rps.client_lock);
2401
	mutex_unlock(&dev->filelist_mutex);
2402

2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
	if (INTEL_GEN(dev_priv) >= 6 &&
	    dev_priv->rps.enabled &&
	    dev_priv->gt.active_engines) {
		u32 rpup, rpupei;
		u32 rpdown, rpdownei;

		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
			   rps_power_to_str(dev_priv->rps.power));
		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
			   100 * rpup / rpupei,
			   dev_priv->rps.up_threshold);
		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
			   100 * rpdown / rpdownei,
			   dev_priv->rps.down_threshold);
	} else {
		seq_puts(m, "\nRPS Autotuning inactive\n");
	}

2428
	return 0;
2429 2430
}

2431 2432
static int i915_llc(struct seq_file *m, void *data)
{
2433
	struct drm_info_node *node = m->private;
2434
	struct drm_device *dev = node->minor->dev;
2435
	struct drm_i915_private *dev_priv = to_i915(dev);
2436
	const bool edram = INTEL_GEN(dev_priv) > 8;
2437 2438

	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2439 2440
	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
		   intel_uncore_edram_size(dev_priv)/1024/1024);
2441 2442 2443 2444

	return 0;
}

2445 2446 2447
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
2448
	struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
2449 2450 2451
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
	u32 tmp, i;

2452
	if (!HAS_GUC_UCODE(dev_priv))
2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
		return 0;

	seq_printf(m, "GuC firmware status:\n");
	seq_printf(m, "\tpath: %s\n",
		guc_fw->guc_fw_path);
	seq_printf(m, "\tfetch: %s\n",
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
	seq_printf(m, "\tload: %s\n",
		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
	seq_printf(m, "\tversion wanted: %d.%d\n",
		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
	seq_printf(m, "\tversion found: %d.%d\n",
		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
A
Alex Dai 已提交
2466 2467 2468 2469 2470 2471
	seq_printf(m, "\theader: offset is %d; size = %d\n",
		guc_fw->header_offset, guc_fw->header_size);
	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
		guc_fw->ucode_offset, guc_fw->ucode_size);
	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
		guc_fw->rsa_offset, guc_fw->rsa_size);
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488

	tmp = I915_READ(GUC_STATUS);

	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
	seq_printf(m, "\tBootrom status = 0x%x\n",
		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
	seq_printf(m, "\tuKernel status = 0x%x\n",
		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
	seq_printf(m, "\tMIA Core status = 0x%x\n",
		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
	seq_puts(m, "\nScratch registers:\n");
	for (i = 0; i < 16; i++)
		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));

	return 0;
}

2489 2490 2491 2492
static void i915_guc_client_info(struct seq_file *m,
				 struct drm_i915_private *dev_priv,
				 struct i915_guc_client *client)
{
2493
	struct intel_engine_cs *engine;
2494
	enum intel_engine_id id;
2495 2496 2497 2498 2499 2500 2501 2502 2503
	uint64_t tot = 0;

	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
		client->priority, client->ctx_index, client->proc_desc_offset);
	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
		client->doorbell_id, client->doorbell_offset, client->cookie);
	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
		client->wq_size, client->wq_offset, client->wq_tail);

2504
	seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2505 2506 2507
	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
	seq_printf(m, "\tLast submission result: %d\n", client->retcode);

2508 2509 2510
	for_each_engine_id(engine, dev_priv, id) {
		u64 submissions = client->submissions[id];
		tot += submissions;
2511
		seq_printf(m, "\tSubmissions: %llu %s\n",
2512
				submissions, engine->name);
2513 2514 2515 2516 2517 2518 2519 2520
	}
	seq_printf(m, "\tTotal: %llu\n", tot);
}

static int i915_guc_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
2521
	struct drm_i915_private *dev_priv = to_i915(dev);
2522
	struct intel_guc guc;
2523
	struct i915_guc_client client = {};
2524
	struct intel_engine_cs *engine;
2525
	enum intel_engine_id id;
2526 2527
	u64 total = 0;

2528
	if (!HAS_GUC_SCHED(dev_priv))
2529 2530
		return 0;

A
Alex Dai 已提交
2531 2532 2533
	if (mutex_lock_interruptible(&dev->struct_mutex))
		return 0;

2534 2535
	/* Take a local copy of the GuC data, so we can dump it at leisure */
	guc = dev_priv->guc;
A
Alex Dai 已提交
2536
	if (guc.execbuf_client)
2537
		client = *guc.execbuf_client;
A
Alex Dai 已提交
2538 2539

	mutex_unlock(&dev->struct_mutex);
2540

2541 2542 2543 2544
	seq_printf(m, "Doorbell map:\n");
	seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);

2545 2546 2547 2548 2549 2550 2551
	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);

	seq_printf(m, "\nGuC submissions:\n");
2552 2553 2554
	for_each_engine_id(engine, dev_priv, id) {
		u64 submissions = guc.submissions[id];
		total += submissions;
2555
		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2556
			engine->name, submissions, guc.last_seqno[id]);
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567
	}
	seq_printf(m, "\t%s: %llu\n", "Total", total);

	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
	i915_guc_client_info(m, dev_priv, &client);

	/* Add more as required ... */

	return 0;
}

A
Alex Dai 已提交
2568 2569 2570 2571
static int i915_guc_log_dump(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
2572
	struct drm_i915_private *dev_priv = to_i915(dev);
A
Alex Dai 已提交
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
	u32 *log;
	int i = 0, pg;

	if (!log_obj)
		return 0;

	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));

		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
				   *(log + i), *(log + i + 1),
				   *(log + i + 2), *(log + i + 3));

		kunmap_atomic(log);
	}

	seq_putc(m, '\n');

	return 0;
}

2596 2597 2598 2599
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
2600
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
2601
	u32 psrperf = 0;
R
Rodrigo Vivi 已提交
2602 2603
	u32 stat[3];
	enum pipe pipe;
R
Rodrigo Vivi 已提交
2604
	bool enabled = false;
2605

2606 2607 2608 2609 2610
	if (!HAS_PSR(dev)) {
		seq_puts(m, "PSR not supported\n");
		return 0;
	}

2611 2612
	intel_runtime_pm_get(dev_priv);

2613
	mutex_lock(&dev_priv->psr.lock);
R
Rodrigo Vivi 已提交
2614 2615
	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2616
	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2617
	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2618 2619 2620 2621
	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
		   dev_priv->psr.busy_frontbuffer_bits);
	seq_printf(m, "Re-enable work scheduled: %s\n",
		   yesno(work_busy(&dev_priv->psr.work.work)));
2622

2623
	if (HAS_DDI(dev))
2624
		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2625 2626 2627 2628 2629 2630 2631
	else {
		for_each_pipe(dev_priv, pipe) {
			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
				VLV_EDP_PSR_CURR_STATE_MASK;
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				enabled = true;
R
Rodrigo Vivi 已提交
2632 2633
		}
	}
2634 2635 2636 2637

	seq_printf(m, "Main link in standby mode: %s\n",
		   yesno(dev_priv->psr.link_standby));

R
Rodrigo Vivi 已提交
2638 2639 2640 2641 2642 2643 2644 2645 2646
	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));

	if (!HAS_DDI(dev))
		for_each_pipe(dev_priv, pipe) {
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				seq_printf(m, " pipe %c", pipe_name(pipe));
		}
	seq_puts(m, "\n");
2647

2648 2649 2650 2651 2652
	/*
	 * VLV/CHV PSR has no kind of performance counter
	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
	 */
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2653
		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
R
Rodrigo Vivi 已提交
2654
			EDP_PSR_PERF_CNT_MASK;
R
Rodrigo Vivi 已提交
2655 2656 2657

		seq_printf(m, "Performance_Counter: %u\n", psrperf);
	}
2658
	mutex_unlock(&dev_priv->psr.lock);
2659

2660
	intel_runtime_pm_put(dev_priv);
2661 2662 2663
	return 0;
}

2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
static int i915_sink_crc(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_connector *connector;
	struct intel_dp *intel_dp = NULL;
	int ret;
	u8 crc[6];

	drm_modeset_lock_all(dev);
2674
	for_each_intel_connector(dev, connector) {
2675
		struct drm_crtc *crtc;
2676

2677
		if (!connector->base.state->best_encoder)
2678 2679
			continue;

2680 2681
		crtc = connector->base.state->crtc;
		if (!crtc->state->active)
2682 2683
			continue;

2684
		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2685 2686
			continue;

2687
		intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703

		ret = intel_dp_sink_crc(intel_dp, crc);
		if (ret)
			goto out;

		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
			   crc[0], crc[1], crc[2],
			   crc[3], crc[4], crc[5]);
		goto out;
	}
	ret = -ENODEV;
out:
	drm_modeset_unlock_all(dev);
	return ret;
}

2704 2705 2706 2707
static int i915_energy_uJ(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
2708
	struct drm_i915_private *dev_priv = to_i915(dev);
2709 2710 2711 2712 2713 2714
	u64 power;
	u32 units;

	if (INTEL_INFO(dev)->gen < 6)
		return -ENODEV;

2715 2716
	intel_runtime_pm_get(dev_priv);

2717 2718 2719 2720 2721 2722
	rdmsrl(MSR_RAPL_POWER_UNIT, power);
	power = (power & 0x1f00) >> 8;
	units = 1000000 / (1 << power); /* convert to uJ */
	power = I915_READ(MCH_SECP_NRG_STTS);
	power *= units;

2723 2724
	intel_runtime_pm_put(dev_priv);

2725
	seq_printf(m, "%llu", (long long unsigned)power);
2726 2727 2728 2729

	return 0;
}

2730
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2731
{
2732
	struct drm_info_node *node = m->private;
2733
	struct drm_device *dev = node->minor->dev;
2734
	struct drm_i915_private *dev_priv = to_i915(dev);
2735

2736 2737
	if (!HAS_RUNTIME_PM(dev_priv))
		seq_puts(m, "Runtime power management not supported\n");
2738

2739
	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2740
	seq_printf(m, "IRQs disabled: %s\n",
2741
		   yesno(!intel_irqs_enabled(dev_priv)));
2742
#ifdef CONFIG_PM
2743 2744
	seq_printf(m, "Usage count: %d\n",
		   atomic_read(&dev->dev->power.usage_count));
2745 2746 2747
#else
	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
2748
	seq_printf(m, "PCI device power state: %s [%d]\n",
2749 2750
		   pci_power_name(dev_priv->drm.pdev->current_state),
		   dev_priv->drm.pdev->current_state);
2751

2752 2753 2754
	return 0;
}

2755 2756
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
2757
	struct drm_info_node *node = m->private;
2758
	struct drm_device *dev = node->minor->dev;
2759
	struct drm_i915_private *dev_priv = to_i915(dev);
2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
	int i;

	mutex_lock(&power_domains->lock);

	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
	for (i = 0; i < power_domains->power_well_count; i++) {
		struct i915_power_well *power_well;
		enum intel_display_power_domain power_domain;

		power_well = &power_domains->power_wells[i];
		seq_printf(m, "%-25s %d\n", power_well->name,
			   power_well->count);

		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
		     power_domain++) {
			if (!(BIT(power_domain) & power_well->domains))
				continue;

			seq_printf(m, "  %-23s %d\n",
2780
				 intel_display_power_domain_str(power_domain),
2781 2782 2783 2784 2785 2786 2787 2788 2789
				 power_domains->domain_use_count[power_domain]);
		}
	}

	mutex_unlock(&power_domains->lock);

	return 0;
}

2790 2791 2792 2793
static int i915_dmc_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
2794
	struct drm_i915_private *dev_priv = to_i915(dev);
2795 2796 2797 2798 2799 2800 2801 2802 2803
	struct intel_csr *csr;

	if (!HAS_CSR(dev)) {
		seq_puts(m, "not supported\n");
		return 0;
	}

	csr = &dev_priv->csr;

2804 2805
	intel_runtime_pm_get(dev_priv);

2806 2807 2808 2809
	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
	seq_printf(m, "path: %s\n", csr->fw_path);

	if (!csr->dmc_payload)
2810
		goto out;
2811 2812 2813 2814

	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
		   CSR_VERSION_MINOR(csr->version));

2815 2816 2817 2818 2819
	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
		seq_printf(m, "DC5 -> DC6 count: %d\n",
			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2820 2821 2822
	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2823 2824
	}

2825 2826 2827 2828 2829
out:
	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));

2830 2831
	intel_runtime_pm_put(dev_priv);

2832 2833 2834
	return 0;
}

2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
static void intel_seq_print_mode(struct seq_file *m, int tabs,
				 struct drm_display_mode *mode)
{
	int i;

	for (i = 0; i < tabs; i++)
		seq_putc(m, '\t');

	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
		   mode->base.id, mode->name,
		   mode->vrefresh, mode->clock,
		   mode->hdisplay, mode->hsync_start,
		   mode->hsync_end, mode->htotal,
		   mode->vdisplay, mode->vsync_start,
		   mode->vsync_end, mode->vtotal,
		   mode->type, mode->flags);
}

static void intel_encoder_info(struct seq_file *m,
			       struct intel_crtc *intel_crtc,
			       struct intel_encoder *intel_encoder)
{
2857
	struct drm_info_node *node = m->private;
2858 2859 2860 2861 2862 2863 2864
	struct drm_device *dev = node->minor->dev;
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_connector *intel_connector;
	struct drm_encoder *encoder;

	encoder = &intel_encoder->base;
	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2865
		   encoder->base.id, encoder->name);
2866 2867 2868 2869
	for_each_connector_on_encoder(dev, encoder, intel_connector) {
		struct drm_connector *connector = &intel_connector->base;
		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
			   connector->base.id,
2870
			   connector->name,
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
			   drm_get_connector_status_name(connector->status));
		if (connector->status == connector_status_connected) {
			struct drm_display_mode *mode = &crtc->mode;
			seq_printf(m, ", mode:\n");
			intel_seq_print_mode(m, 2, mode);
		} else {
			seq_putc(m, '\n');
		}
	}
}

static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
2884
	struct drm_info_node *node = m->private;
2885 2886 2887
	struct drm_device *dev = node->minor->dev;
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_encoder *intel_encoder;
2888 2889
	struct drm_plane_state *plane_state = crtc->primary->state;
	struct drm_framebuffer *fb = plane_state->fb;
2890

2891
	if (fb)
2892
		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2893 2894
			   fb->base.id, plane_state->src_x >> 16,
			   plane_state->src_y >> 16, fb->width, fb->height);
2895 2896
	else
		seq_puts(m, "\tprimary plane disabled\n");
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
		intel_encoder_info(m, intel_crtc, intel_encoder);
}

static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
	struct drm_display_mode *mode = panel->fixed_mode;

	seq_printf(m, "\tfixed mode:\n");
	intel_seq_print_mode(m, 2, mode);
}

static void intel_dp_info(struct seq_file *m,
			  struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);

	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2916
	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2917
	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2918 2919 2920 2921 2922 2923 2924 2925 2926
		intel_panel_info(m, &intel_connector->panel);
}

static void intel_hdmi_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);

2927
	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
}

static void intel_lvds_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	intel_panel_info(m, &intel_connector->panel);
}

static void intel_connector_info(struct seq_file *m,
				 struct drm_connector *connector)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct intel_encoder *intel_encoder = intel_connector->encoder;
2941
	struct drm_display_mode *mode;
2942 2943

	seq_printf(m, "connector %d: type %s, status: %s\n",
2944
		   connector->base.id, connector->name,
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
		   drm_get_connector_status_name(connector->status));
	if (connector->status == connector_status_connected) {
		seq_printf(m, "\tname: %s\n", connector->display_info.name);
		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
			   connector->display_info.width_mm,
			   connector->display_info.height_mm);
		seq_printf(m, "\tsubpixel order: %s\n",
			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
		seq_printf(m, "\tCEA rev: %d\n",
			   connector->display_info.cea_rev);
	}
2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966

	if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
		return;

	switch (connector->connector_type) {
	case DRM_MODE_CONNECTOR_DisplayPort:
	case DRM_MODE_CONNECTOR_eDP:
		intel_dp_info(m, intel_connector);
		break;
	case DRM_MODE_CONNECTOR_LVDS:
		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2967
			intel_lvds_info(m, intel_connector);
2968 2969 2970 2971 2972 2973 2974 2975
		break;
	case DRM_MODE_CONNECTOR_HDMIA:
		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
		    intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
			intel_hdmi_info(m, intel_connector);
		break;
	default:
		break;
2976
	}
2977

2978 2979 2980
	seq_printf(m, "\tmodes:\n");
	list_for_each_entry(mode, &connector->modes, head)
		intel_seq_print_mode(m, 2, mode);
2981 2982
}

2983 2984
static bool cursor_active(struct drm_device *dev, int pipe)
{
2985
	struct drm_i915_private *dev_priv = to_i915(dev);
2986 2987 2988
	u32 state;

	if (IS_845G(dev) || IS_I865G(dev))
2989
		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2990
	else
2991
		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2992 2993 2994 2995 2996 2997

	return state;
}

static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
{
2998
	struct drm_i915_private *dev_priv = to_i915(dev);
2999 3000
	u32 pos;

3001
	pos = I915_READ(CURPOS(pipe));
3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013

	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
		*x = -*x;

	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
		*y = -*y;

	return cursor_active(dev, pipe);
}

3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
static const char *plane_type(enum drm_plane_type type)
{
	switch (type) {
	case DRM_PLANE_TYPE_OVERLAY:
		return "OVL";
	case DRM_PLANE_TYPE_PRIMARY:
		return "PRI";
	case DRM_PLANE_TYPE_CURSOR:
		return "CUR";
	/*
	 * Deliberately omitting default: to generate compiler warnings
	 * when a new drm_plane_type gets added.
	 */
	}

	return "unknown";
}

static const char *plane_rotation(unsigned int rotation)
{
	static char buf[48];
	/*
	 * According to doc only one DRM_ROTATE_ is allowed but this
	 * will print them all to visualize if the values are misused
	 */
	snprintf(buf, sizeof(buf),
		 "%s%s%s%s%s%s(0x%08x)",
3041 3042 3043 3044 3045 3046
		 (rotation & DRM_ROTATE_0) ? "0 " : "",
		 (rotation & DRM_ROTATE_90) ? "90 " : "",
		 (rotation & DRM_ROTATE_180) ? "180 " : "",
		 (rotation & DRM_ROTATE_270) ? "270 " : "",
		 (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
		 (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114
		 rotation);

	return buf;
}

static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_plane *intel_plane;

	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
		struct drm_plane_state *state;
		struct drm_plane *plane = &intel_plane->base;

		if (!plane->state) {
			seq_puts(m, "plane->state is NULL!\n");
			continue;
		}

		state = plane->state;

		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
			   plane->base.id,
			   plane_type(intel_plane->base.type),
			   state->crtc_x, state->crtc_y,
			   state->crtc_w, state->crtc_h,
			   (state->src_x >> 16),
			   ((state->src_x & 0xffff) * 15625) >> 10,
			   (state->src_y >> 16),
			   ((state->src_y & 0xffff) * 15625) >> 10,
			   (state->src_w >> 16),
			   ((state->src_w & 0xffff) * 15625) >> 10,
			   (state->src_h >> 16),
			   ((state->src_h & 0xffff) * 15625) >> 10,
			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
			   plane_rotation(state->rotation));
	}
}

static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
	struct intel_crtc_state *pipe_config;
	int num_scalers = intel_crtc->num_scalers;
	int i;

	pipe_config = to_intel_crtc_state(intel_crtc->base.state);

	/* Not all platformas have a scaler */
	if (num_scalers) {
		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
			   num_scalers,
			   pipe_config->scaler_state.scaler_users,
			   pipe_config->scaler_state.scaler_id);

		for (i = 0; i < SKL_NUM_SCALERS; i++) {
			struct intel_scaler *sc =
					&pipe_config->scaler_state.scalers[i];

			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
				   i, yesno(sc->in_use), sc->mode);
		}
		seq_puts(m, "\n");
	} else {
		seq_puts(m, "\tNo scalers available on this platform\n");
	}
}

3115 3116
static int i915_display_info(struct seq_file *m, void *unused)
{
3117
	struct drm_info_node *node = m->private;
3118
	struct drm_device *dev = node->minor->dev;
3119
	struct drm_i915_private *dev_priv = to_i915(dev);
3120
	struct intel_crtc *crtc;
3121 3122
	struct drm_connector *connector;

3123
	intel_runtime_pm_get(dev_priv);
3124 3125 3126
	drm_modeset_lock_all(dev);
	seq_printf(m, "CRTC info\n");
	seq_printf(m, "---------\n");
3127
	for_each_intel_crtc(dev, crtc) {
3128
		bool active;
3129
		struct intel_crtc_state *pipe_config;
3130
		int x, y;
3131

3132 3133
		pipe_config = to_intel_crtc_state(crtc->base.state);

3134
		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3135
			   crtc->base.base.id, pipe_name(crtc->pipe),
3136
			   yesno(pipe_config->base.active),
3137 3138 3139
			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
			   yesno(pipe_config->dither), pipe_config->pipe_bpp);

3140
		if (pipe_config->base.active) {
3141 3142
			intel_crtc_info(m, crtc);

3143
			active = cursor_position(dev, crtc->pipe, &x, &y);
3144
			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3145
				   yesno(crtc->cursor_base),
3146 3147
				   x, y, crtc->base.cursor->state->crtc_w,
				   crtc->base.cursor->state->crtc_h,
3148
				   crtc->cursor_addr, yesno(active));
3149 3150
			intel_scaler_info(m, crtc);
			intel_plane_info(m, crtc);
3151
		}
3152 3153 3154 3155

		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
			   yesno(!crtc->cpu_fifo_underrun_disabled),
			   yesno(!crtc->pch_fifo_underrun_disabled));
3156 3157 3158 3159 3160 3161 3162 3163 3164
	}

	seq_printf(m, "\n");
	seq_printf(m, "Connector info\n");
	seq_printf(m, "--------------\n");
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		intel_connector_info(m, connector);
	}
	drm_modeset_unlock_all(dev);
3165
	intel_runtime_pm_put(dev_priv);
3166 3167 3168 3169

	return 0;
}

B
Ben Widawsky 已提交
3170 3171 3172 3173
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
3174
	struct drm_i915_private *dev_priv = to_i915(dev);
3175
	struct intel_engine_cs *engine;
3176
	int num_rings = INTEL_INFO(dev)->num_rings;
3177 3178
	enum intel_engine_id id;
	int j, ret;
B
Ben Widawsky 已提交
3179

3180
	if (!i915.semaphores) {
B
Ben Widawsky 已提交
3181 3182 3183 3184 3185 3186 3187
		seq_puts(m, "Semaphores are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
3188
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
3189 3190 3191 3192 3193 3194 3195 3196

	if (IS_BROADWELL(dev)) {
		struct page *page;
		uint64_t *seqno;

		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);

		seqno = (uint64_t *)kmap_atomic(page);
3197
		for_each_engine_id(engine, dev_priv, id) {
B
Ben Widawsky 已提交
3198 3199
			uint64_t offset;

3200
			seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
3201 3202 3203

			seq_puts(m, "  Last signal:");
			for (j = 0; j < num_rings; j++) {
3204
				offset = id * I915_NUM_ENGINES + j;
B
Ben Widawsky 已提交
3205 3206 3207 3208 3209 3210 3211
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

			seq_puts(m, "  Last wait:  ");
			for (j = 0; j < num_rings; j++) {
3212
				offset = id + (j * I915_NUM_ENGINES);
B
Ben Widawsky 已提交
3213 3214 3215 3216 3217 3218 3219 3220 3221
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

		}
		kunmap_atomic(seqno);
	} else {
		seq_puts(m, "  Last signal:");
3222
		for_each_engine(engine, dev_priv)
B
Ben Widawsky 已提交
3223 3224
			for (j = 0; j < num_rings; j++)
				seq_printf(m, "0x%08x\n",
3225
					   I915_READ(engine->semaphore.mbox.signal[j]));
B
Ben Widawsky 已提交
3226 3227 3228 3229
		seq_putc(m, '\n');
	}

	seq_puts(m, "\nSync seqno:\n");
3230 3231
	for_each_engine(engine, dev_priv) {
		for (j = 0; j < num_rings; j++)
3232 3233
			seq_printf(m, "  0x%08x ",
				   engine->semaphore.sync_seqno[j]);
B
Ben Widawsky 已提交
3234 3235 3236 3237
		seq_putc(m, '\n');
	}
	seq_putc(m, '\n');

3238
	intel_runtime_pm_put(dev_priv);
B
Ben Widawsky 已提交
3239 3240 3241 3242
	mutex_unlock(&dev->struct_mutex);
	return 0;
}

3243 3244 3245 3246
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
3247
	struct drm_i915_private *dev_priv = to_i915(dev);
3248 3249 3250 3251 3252 3253 3254
	int i;

	drm_modeset_lock_all(dev);
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];

		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3255 3256
		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
			   pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
3257
		seq_printf(m, " tracked hardware state:\n");
3258 3259 3260 3261 3262 3263
		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
		seq_printf(m, " dpll_md: 0x%08x\n",
			   pll->config.hw_state.dpll_md);
		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
3264 3265 3266 3267 3268 3269
	}
	drm_modeset_unlock_all(dev);

	return 0;
}

3270
static int i915_wa_registers(struct seq_file *m, void *unused)
3271 3272 3273
{
	int i;
	int ret;
3274
	struct intel_engine_cs *engine;
3275 3276
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
3277
	struct drm_i915_private *dev_priv = to_i915(dev);
3278
	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3279
	enum intel_engine_id id;
3280 3281 3282 3283 3284 3285 3286

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(dev_priv);

3287
	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3288
	for_each_engine_id(engine, dev_priv, id)
3289
		seq_printf(m, "HW whitelist count for %s: %d\n",
3290
			   engine->name, workarounds->hw_whitelist_count[id]);
3291
	for (i = 0; i < workarounds->count; ++i) {
3292 3293
		i915_reg_t addr;
		u32 mask, value, read;
3294
		bool ok;
3295

3296 3297 3298
		addr = workarounds->reg[i].addr;
		mask = workarounds->reg[i].mask;
		value = workarounds->reg[i].value;
3299 3300 3301
		read = I915_READ(addr);
		ok = (value & mask) == (read & mask);
		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3302
			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3303 3304 3305 3306 3307 3308 3309 3310
	}

	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

3311 3312 3313 3314
static int i915_ddb_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
3315
	struct drm_i915_private *dev_priv = to_i915(dev);
3316 3317 3318 3319 3320
	struct skl_ddb_allocation *ddb;
	struct skl_ddb_entry *entry;
	enum pipe pipe;
	int plane;

3321 3322 3323
	if (INTEL_INFO(dev)->gen < 9)
		return 0;

3324 3325 3326 3327 3328 3329 3330 3331 3332
	drm_modeset_lock_all(dev);

	ddb = &dev_priv->wm.skl_hw.ddb;

	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");

	for_each_pipe(dev_priv, pipe) {
		seq_printf(m, "Pipe %c\n", pipe_name(pipe));

3333
		for_each_plane(dev_priv, pipe, plane) {
3334 3335 3336 3337 3338 3339
			entry = &ddb->plane[pipe][plane];
			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
				   entry->start, entry->end,
				   skl_ddb_entry_size(entry));
		}

3340
		entry = &ddb->plane[pipe][PLANE_CURSOR];
3341 3342 3343 3344 3345 3346 3347 3348 3349
		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
			   entry->end, skl_ddb_entry_size(entry));
	}

	drm_modeset_unlock_all(dev);

	return 0;
}

3350 3351 3352
static void drrs_status_per_crtc(struct seq_file *m,
		struct drm_device *dev, struct intel_crtc *intel_crtc)
{
3353
	struct drm_i915_private *dev_priv = to_i915(dev);
3354 3355
	struct i915_drrs *drrs = &dev_priv->drrs;
	int vrefresh = 0;
3356
	struct drm_connector *connector;
3357

3358 3359 3360 3361 3362
	drm_for_each_connector(connector, dev) {
		if (connector->state->crtc != &intel_crtc->base)
			continue;

		seq_printf(m, "%s:\n", connector->name);
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
	}

	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Static");
	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Seamless");
	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
		seq_puts(m, "\tVBT: DRRS_type: None");
	else
		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");

	seq_puts(m, "\n\n");

3376
	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
		struct intel_panel *panel;

		mutex_lock(&drrs->mutex);
		/* DRRS Supported */
		seq_puts(m, "\tDRRS Supported: Yes\n");

		/* disable_drrs() will make drrs->dp NULL */
		if (!drrs->dp) {
			seq_puts(m, "Idleness DRRS: Disabled");
			mutex_unlock(&drrs->mutex);
			return;
		}

		panel = &drrs->dp->attached_connector->panel;
		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
					drrs->busy_frontbuffer_bits);

		seq_puts(m, "\n\t\t");
		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
			vrefresh = panel->fixed_mode->vrefresh;
		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
			vrefresh = panel->downclock_mode->vrefresh;
		} else {
			seq_printf(m, "DRRS_State: Unknown(%d)\n",
						drrs->refresh_rate_type);
			mutex_unlock(&drrs->mutex);
			return;
		}
		seq_printf(m, "\t\tVrefresh: %d", vrefresh);

		seq_puts(m, "\n\t\t");
		mutex_unlock(&drrs->mutex);
	} else {
		/* DRRS not supported. Print the VBT parameter*/
		seq_puts(m, "\tDRRS Supported : No");
	}
	seq_puts(m, "\n");
}

static int i915_drrs_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_crtc *intel_crtc;
	int active_crtc_cnt = 0;

3425
	drm_modeset_lock_all(dev);
3426
	for_each_intel_crtc(dev, intel_crtc) {
3427
		if (intel_crtc->base.state->active) {
3428 3429 3430 3431 3432 3433
			active_crtc_cnt++;
			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);

			drrs_status_per_crtc(m, dev, intel_crtc);
		}
	}
3434
	drm_modeset_unlock_all(dev);
3435 3436 3437 3438 3439 3440 3441

	if (!active_crtc_cnt)
		seq_puts(m, "No active crtc found\n");

	return 0;
}

3442 3443 3444 3445 3446 3447
struct pipe_crc_info {
	const char *name;
	struct drm_device *dev;
	enum pipe pipe;
};

3448 3449 3450 3451 3452 3453
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_encoder *intel_encoder;
	struct intel_digital_port *intel_dig_port;
3454 3455
	struct drm_connector *connector;

3456
	drm_modeset_lock_all(dev);
3457 3458
	drm_for_each_connector(connector, dev) {
		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3459
			continue;
3460 3461 3462 3463 3464 3465

		intel_encoder = intel_attached_encoder(connector);
		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
			continue;

		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3466 3467
		if (!intel_dig_port->dp.can_mst)
			continue;
3468

3469 3470
		seq_printf(m, "MST Source Port %c\n",
			   port_name(intel_dig_port->port));
3471 3472 3473 3474 3475 3476
		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
	}
	drm_modeset_unlock_all(dev);
	return 0;
}

3477 3478
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
3479
	struct pipe_crc_info *info = inode->i_private;
3480
	struct drm_i915_private *dev_priv = to_i915(info->dev);
3481 3482
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];

3483 3484 3485
	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
		return -ENODEV;

3486 3487 3488 3489
	spin_lock_irq(&pipe_crc->lock);

	if (pipe_crc->opened) {
		spin_unlock_irq(&pipe_crc->lock);
3490 3491 3492
		return -EBUSY; /* already open */
	}

3493
	pipe_crc->opened = true;
3494 3495
	filep->private_data = inode->i_private;

3496 3497
	spin_unlock_irq(&pipe_crc->lock);

3498 3499 3500 3501 3502
	return 0;
}

static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
{
3503
	struct pipe_crc_info *info = inode->i_private;
3504
	struct drm_i915_private *dev_priv = to_i915(info->dev);
3505 3506
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];

3507 3508 3509
	spin_lock_irq(&pipe_crc->lock);
	pipe_crc->opened = false;
	spin_unlock_irq(&pipe_crc->lock);
3510

3511 3512 3513 3514 3515 3516 3517 3518 3519
	return 0;
}

/* (6 fields, 8 chars each, space separated (5) + '\n') */
#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
/* account for \'0' */
#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)

static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3520
{
3521 3522 3523
	assert_spin_locked(&pipe_crc->lock);
	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
			INTEL_PIPE_CRC_ENTRIES_NR);
3524 3525 3526 3527 3528 3529 3530 3531
}

static ssize_t
i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
		   loff_t *pos)
{
	struct pipe_crc_info *info = filep->private_data;
	struct drm_device *dev = info->dev;
3532
	struct drm_i915_private *dev_priv = to_i915(dev);
3533 3534
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
	char buf[PIPE_CRC_BUFFER_LEN];
3535
	int n_entries;
3536 3537 3538 3539 3540 3541 3542 3543 3544 3545
	ssize_t bytes_read;

	/*
	 * Don't allow user space to provide buffers not big enough to hold
	 * a line of data.
	 */
	if (count < PIPE_CRC_LINE_LEN)
		return -EINVAL;

	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3546
		return 0;
3547 3548

	/* nothing to read */
3549
	spin_lock_irq(&pipe_crc->lock);
3550
	while (pipe_crc_data_count(pipe_crc) == 0) {
3551 3552 3553 3554
		int ret;

		if (filep->f_flags & O_NONBLOCK) {
			spin_unlock_irq(&pipe_crc->lock);
3555
			return -EAGAIN;
3556
		}
3557

3558 3559 3560 3561 3562 3563
		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
		if (ret) {
			spin_unlock_irq(&pipe_crc->lock);
			return ret;
		}
3564 3565
	}

3566
	/* We now have one or more entries to read */
3567
	n_entries = count / PIPE_CRC_LINE_LEN;
3568

3569
	bytes_read = 0;
3570 3571 3572
	while (n_entries > 0) {
		struct intel_pipe_crc_entry *entry =
			&pipe_crc->entries[pipe_crc->tail];
3573

3574 3575 3576 3577 3578 3579 3580
		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
			break;

		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);

3581 3582 3583 3584 3585 3586
		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
				       "%8u %8x %8x %8x %8x %8x\n",
				       entry->frame, entry->crc[0],
				       entry->crc[1], entry->crc[2],
				       entry->crc[3], entry->crc[4]);

3587 3588
		spin_unlock_irq(&pipe_crc->lock);

3589
		if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
3590
			return -EFAULT;
3591

3592 3593 3594 3595 3596
		user_buf += PIPE_CRC_LINE_LEN;
		n_entries--;

		spin_lock_irq(&pipe_crc->lock);
	}
3597

3598 3599
	spin_unlock_irq(&pipe_crc->lock);

3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634
	return bytes_read;
}

static const struct file_operations i915_pipe_crc_fops = {
	.owner = THIS_MODULE,
	.open = i915_pipe_crc_open,
	.read = i915_pipe_crc_read,
	.release = i915_pipe_crc_release,
};

static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
	{
		.name = "i915_pipe_A_crc",
		.pipe = PIPE_A,
	},
	{
		.name = "i915_pipe_B_crc",
		.pipe = PIPE_B,
	},
	{
		.name = "i915_pipe_C_crc",
		.pipe = PIPE_C,
	},
};

static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
				enum pipe pipe)
{
	struct drm_device *dev = minor->dev;
	struct dentry *ent;
	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];

	info->dev = dev;
	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
				  &i915_pipe_crc_fops);
3635 3636
	if (!ent)
		return -ENOMEM;
3637 3638

	return drm_add_fake_info_node(minor, ent, info);
3639 3640
}

D
Daniel Vetter 已提交
3641
static const char * const pipe_crc_sources[] = {
3642 3643 3644 3645
	"none",
	"plane1",
	"plane2",
	"pf",
3646
	"pipe",
D
Daniel Vetter 已提交
3647 3648 3649 3650
	"TV",
	"DP-B",
	"DP-C",
	"DP-D",
3651
	"auto",
3652 3653 3654 3655 3656 3657 3658 3659
};

static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
{
	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
	return pipe_crc_sources[source];
}

3660
static int display_crc_ctl_show(struct seq_file *m, void *data)
3661 3662
{
	struct drm_device *dev = m->private;
3663
	struct drm_i915_private *dev_priv = to_i915(dev);
3664 3665 3666 3667 3668 3669 3670 3671 3672
	int i;

	for (i = 0; i < I915_MAX_PIPES; i++)
		seq_printf(m, "%c %s\n", pipe_name(i),
			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));

	return 0;
}

3673
static int display_crc_ctl_open(struct inode *inode, struct file *file)
3674 3675 3676
{
	struct drm_device *dev = inode->i_private;

3677
	return single_open(file, display_crc_ctl_show, dev);
3678 3679
}

3680
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
D
Daniel Vetter 已提交
3681 3682
				 uint32_t *val)
{
3683 3684 3685 3686
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PIPE;

	switch (*source) {
D
Daniel Vetter 已提交
3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

3700 3701 3702 3703 3704
static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
				     enum intel_pipe_crc_source *source)
{
	struct intel_encoder *encoder;
	struct intel_crtc *crtc;
3705
	struct intel_digital_port *dig_port;
3706 3707 3708 3709
	int ret = 0;

	*source = INTEL_PIPE_CRC_SOURCE_PIPE;

3710
	drm_modeset_lock_all(dev);
3711
	for_each_intel_encoder(dev, encoder) {
3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723
		if (!encoder->base.crtc)
			continue;

		crtc = to_intel_crtc(encoder->base.crtc);

		if (crtc->pipe != pipe)
			continue;

		switch (encoder->type) {
		case INTEL_OUTPUT_TVOUT:
			*source = INTEL_PIPE_CRC_SOURCE_TV;
			break;
3724
		case INTEL_OUTPUT_DP:
3725
		case INTEL_OUTPUT_EDP:
3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741
			dig_port = enc_to_dig_port(&encoder->base);
			switch (dig_port->port) {
			case PORT_B:
				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
				break;
			case PORT_C:
				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
				break;
			case PORT_D:
				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
				break;
			default:
				WARN(1, "nonexisting DP port %c\n",
				     port_name(dig_port->port));
				break;
			}
3742
			break;
3743 3744
		default:
			break;
3745 3746
		}
	}
3747
	drm_modeset_unlock_all(dev);
3748 3749 3750 3751 3752 3753 3754

	return ret;
}

static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
D
Daniel Vetter 已提交
3755 3756
				uint32_t *val)
{
3757
	struct drm_i915_private *dev_priv = to_i915(dev);
3758 3759
	bool need_stable_symbols = false;

3760 3761 3762 3763 3764 3765 3766
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
		if (ret)
			return ret;
	}

	switch (*source) {
D
Daniel Vetter 已提交
3767 3768 3769 3770 3771
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_B:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3772
		need_stable_symbols = true;
D
Daniel Vetter 已提交
3773 3774 3775
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_C:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3776
		need_stable_symbols = true;
D
Daniel Vetter 已提交
3777
		break;
3778 3779 3780 3781 3782 3783
	case INTEL_PIPE_CRC_SOURCE_DP_D:
		if (!IS_CHERRYVIEW(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
		need_stable_symbols = true;
		break;
D
Daniel Vetter 已提交
3784 3785 3786 3787 3788 3789 3790
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803
	/*
	 * When the pipe CRC tap point is after the transcoders we need
	 * to tweak symbol-level features to produce a deterministic series of
	 * symbols for a given frame. We need to reset those features only once
	 * a frame (instead of every nth symbol):
	 *   - DC-balance: used to ensure a better clock recovery from the data
	 *     link (SDVO)
	 *   - DisplayPort scrambling: used for EMI reduction
	 */
	if (need_stable_symbols) {
		uint32_t tmp = I915_READ(PORT_DFT2_G4X);

		tmp |= DC_BALANCE_RESET_VLV;
3804 3805
		switch (pipe) {
		case PIPE_A:
3806
			tmp |= PIPE_A_SCRAMBLE_RESET;
3807 3808
			break;
		case PIPE_B:
3809
			tmp |= PIPE_B_SCRAMBLE_RESET;
3810 3811 3812 3813 3814 3815 3816
			break;
		case PIPE_C:
			tmp |= PIPE_C_SCRAMBLE_RESET;
			break;
		default:
			return -EINVAL;
		}
3817 3818 3819
		I915_WRITE(PORT_DFT2_G4X, tmp);
	}

D
Daniel Vetter 已提交
3820 3821 3822
	return 0;
}

3823
static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3824 3825
				 enum pipe pipe,
				 enum intel_pipe_crc_source *source,
3826 3827
				 uint32_t *val)
{
3828
	struct drm_i915_private *dev_priv = to_i915(dev);
3829 3830
	bool need_stable_symbols = false;

3831 3832 3833 3834 3835 3836 3837
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
		if (ret)
			return ret;
	}

	switch (*source) {
3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
		break;
	case INTEL_PIPE_CRC_SOURCE_TV:
		if (!SUPPORTS_TV(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_B:
		if (!IS_G4X(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3850
		need_stable_symbols = true;
3851 3852 3853 3854 3855
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_C:
		if (!IS_G4X(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3856
		need_stable_symbols = true;
3857 3858 3859 3860 3861
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_D:
		if (!IS_G4X(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3862
		need_stable_symbols = true;
3863 3864 3865 3866 3867 3868 3869 3870
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895
	/*
	 * When the pipe CRC tap point is after the transcoders we need
	 * to tweak symbol-level features to produce a deterministic series of
	 * symbols for a given frame. We need to reset those features only once
	 * a frame (instead of every nth symbol):
	 *   - DC-balance: used to ensure a better clock recovery from the data
	 *     link (SDVO)
	 *   - DisplayPort scrambling: used for EMI reduction
	 */
	if (need_stable_symbols) {
		uint32_t tmp = I915_READ(PORT_DFT2_G4X);

		WARN_ON(!IS_G4X(dev));

		I915_WRITE(PORT_DFT_I9XX,
			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);

		if (pipe == PIPE_A)
			tmp |= PIPE_A_SCRAMBLE_RESET;
		else
			tmp |= PIPE_B_SCRAMBLE_RESET;

		I915_WRITE(PORT_DFT2_G4X, tmp);
	}

3896 3897 3898
	return 0;
}

3899 3900 3901
static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
					 enum pipe pipe)
{
3902
	struct drm_i915_private *dev_priv = to_i915(dev);
3903 3904
	uint32_t tmp = I915_READ(PORT_DFT2_G4X);

3905 3906
	switch (pipe) {
	case PIPE_A:
3907
		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3908 3909
		break;
	case PIPE_B:
3910
		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3911 3912 3913 3914 3915 3916 3917
		break;
	case PIPE_C:
		tmp &= ~PIPE_C_SCRAMBLE_RESET;
		break;
	default:
		return;
	}
3918 3919 3920 3921 3922 3923
	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
		tmp &= ~DC_BALANCE_RESET_VLV;
	I915_WRITE(PORT_DFT2_G4X, tmp);

}

3924 3925 3926
static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
					 enum pipe pipe)
{
3927
	struct drm_i915_private *dev_priv = to_i915(dev);
3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
	uint32_t tmp = I915_READ(PORT_DFT2_G4X);

	if (pipe == PIPE_A)
		tmp &= ~PIPE_A_SCRAMBLE_RESET;
	else
		tmp &= ~PIPE_B_SCRAMBLE_RESET;
	I915_WRITE(PORT_DFT2_G4X, tmp);

	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
		I915_WRITE(PORT_DFT_I9XX,
			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
	}
}

3942
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3943 3944
				uint32_t *val)
{
3945 3946 3947 3948
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PIPE;

	switch (*source) {
3949 3950 3951 3952 3953 3954 3955 3956 3957
	case INTEL_PIPE_CRC_SOURCE_PLANE1:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
		break;
	case INTEL_PIPE_CRC_SOURCE_PLANE2:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
		break;
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
		break;
D
Daniel Vetter 已提交
3958
	case INTEL_PIPE_CRC_SOURCE_NONE:
3959 3960
		*val = 0;
		break;
D
Daniel Vetter 已提交
3961 3962
	default:
		return -EINVAL;
3963 3964 3965 3966 3967
	}

	return 0;
}

3968
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3969
{
3970
	struct drm_i915_private *dev_priv = to_i915(dev);
3971 3972
	struct intel_crtc *crtc =
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3973
	struct intel_crtc_state *pipe_config;
3974 3975
	struct drm_atomic_state *state;
	int ret = 0;
3976 3977

	drm_modeset_lock_all(dev);
3978 3979 3980 3981
	state = drm_atomic_state_alloc(dev);
	if (!state) {
		ret = -ENOMEM;
		goto out;
3982 3983
	}

3984 3985 3986 3987 3988 3989
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
	pipe_config = intel_atomic_get_crtc_state(state, crtc);
	if (IS_ERR(pipe_config)) {
		ret = PTR_ERR(pipe_config);
		goto out;
	}
3990

3991 3992 3993 3994
	pipe_config->pch_pfit.force_thru = enable;
	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
	    pipe_config->pch_pfit.enabled != enable)
		pipe_config->base.connectors_changed = true;
3995

3996 3997
	ret = drm_atomic_commit(state);
out:
3998
	drm_modeset_unlock_all(dev);
3999 4000 4001
	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
	if (ret)
		drm_atomic_state_free(state);
4002 4003 4004 4005 4006
}

static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
4007 4008
				uint32_t *val)
{
4009 4010 4011 4012
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PF;

	switch (*source) {
4013 4014 4015 4016 4017 4018 4019
	case INTEL_PIPE_CRC_SOURCE_PLANE1:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PLANE2:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PF:
4020
		if (IS_HASWELL(dev) && pipe == PIPE_A)
4021
			hsw_trans_edp_pipe_A_crc_wa(dev, true);
4022

4023 4024
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
		break;
D
Daniel Vetter 已提交
4025
	case INTEL_PIPE_CRC_SOURCE_NONE:
4026 4027
		*val = 0;
		break;
D
Daniel Vetter 已提交
4028 4029
	default:
		return -EINVAL;
4030 4031 4032 4033 4034
	}

	return 0;
}

4035 4036 4037
static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
			       enum intel_pipe_crc_source source)
{
4038
	struct drm_i915_private *dev_priv = to_i915(dev);
4039
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4040 4041
	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
									pipe));
4042
	enum intel_display_power_domain power_domain;
4043
	u32 val = 0; /* shut up gcc */
4044
	int ret;
4045

4046 4047 4048
	if (pipe_crc->source == source)
		return 0;

4049 4050 4051 4052
	/* forbid changing the source without going back to 'none' */
	if (pipe_crc->source && source)
		return -EINVAL;

4053 4054
	power_domain = POWER_DOMAIN_PIPE(pipe);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4055 4056 4057 4058
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

D
Daniel Vetter 已提交
4059
	if (IS_GEN2(dev))
4060
		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
D
Daniel Vetter 已提交
4061
	else if (INTEL_INFO(dev)->gen < 5)
4062
		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4063
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4064
		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4065
	else if (IS_GEN5(dev) || IS_GEN6(dev))
4066
		ret = ilk_pipe_crc_ctl_reg(&source, &val);
4067
	else
4068
		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4069 4070

	if (ret != 0)
4071
		goto out;
4072

4073 4074
	/* none -> real source transition */
	if (source) {
4075 4076
		struct intel_pipe_crc_entry *entries;

4077 4078 4079
		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
				 pipe_name(pipe), pipe_crc_source_name(source));

4080 4081
		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
				  sizeof(pipe_crc->entries[0]),
4082
				  GFP_KERNEL);
4083 4084 4085 4086
		if (!entries) {
			ret = -ENOMEM;
			goto out;
		}
4087

4088 4089 4090 4091 4092 4093 4094 4095
		/*
		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
		 * enabled and disabled dynamically based on package C states,
		 * user space can't make reliable use of the CRCs, so let's just
		 * completely disable it.
		 */
		hsw_disable_ips(crtc);

4096
		spin_lock_irq(&pipe_crc->lock);
4097
		kfree(pipe_crc->entries);
4098
		pipe_crc->entries = entries;
4099 4100 4101
		pipe_crc->head = 0;
		pipe_crc->tail = 0;
		spin_unlock_irq(&pipe_crc->lock);
4102 4103
	}

4104
	pipe_crc->source = source;
4105 4106 4107 4108

	I915_WRITE(PIPE_CRC_CTL(pipe), val);
	POSTING_READ(PIPE_CRC_CTL(pipe));

4109 4110
	/* real source -> none transition */
	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4111
		struct intel_pipe_crc_entry *entries;
4112 4113
		struct intel_crtc *crtc =
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4114

4115 4116 4117
		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
				 pipe_name(pipe));

4118
		drm_modeset_lock(&crtc->base.mutex, NULL);
4119
		if (crtc->base.state->active)
4120 4121
			intel_wait_for_vblank(dev, pipe);
		drm_modeset_unlock(&crtc->base.mutex);
4122

4123 4124
		spin_lock_irq(&pipe_crc->lock);
		entries = pipe_crc->entries;
4125
		pipe_crc->entries = NULL;
4126 4127
		pipe_crc->head = 0;
		pipe_crc->tail = 0;
4128 4129 4130
		spin_unlock_irq(&pipe_crc->lock);

		kfree(entries);
4131 4132 4133

		if (IS_G4X(dev))
			g4x_undo_pipe_scramble_reset(dev, pipe);
4134
		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4135
			vlv_undo_pipe_scramble_reset(dev, pipe);
4136
		else if (IS_HASWELL(dev) && pipe == PIPE_A)
4137
			hsw_trans_edp_pipe_A_crc_wa(dev, false);
4138 4139

		hsw_enable_ips(crtc);
4140 4141
	}

4142 4143 4144 4145 4146 4147
	ret = 0;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
4148 4149 4150 4151
}

/*
 * Parse pipe CRC command strings:
4152 4153 4154
 *   command: wsp* object wsp+ name wsp+ source wsp*
 *   object: 'pipe'
 *   name: (A | B | C)
4155 4156 4157 4158
 *   source: (none | plane1 | plane2 | pf)
 *   wsp: (#0x20 | #0x9 | #0xA)+
 *
 * eg.:
4159 4160
 *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
 *  "pipe A none"    ->  Stop CRC
4161
 */
4162
static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
{
	int n_words = 0;

	while (*buf) {
		char *end;

		/* skip leading white space */
		buf = skip_spaces(buf);
		if (!*buf)
			break;	/* end of buffer */

		/* find end of word */
		for (end = buf; *end && !isspace(*end); end++)
			;

		if (n_words == max_words) {
			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
					 max_words);
			return -EINVAL;	/* ran out of words[] before bytes */
		}

		if (*end)
			*end++ = '\0';
		words[n_words++] = buf;
		buf = end;
	}

	return n_words;
}

4193 4194 4195 4196
enum intel_pipe_crc_object {
	PIPE_CRC_OBJECT_PIPE,
};

D
Daniel Vetter 已提交
4197
static const char * const pipe_crc_objects[] = {
4198 4199 4200 4201
	"pipe",
};

static int
4202
display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4203 4204 4205 4206 4207
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
		if (!strcmp(buf, pipe_crc_objects[i])) {
4208
			*o = i;
4209 4210 4211 4212 4213 4214
			return 0;
		    }

	return -EINVAL;
}

4215
static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227
{
	const char name = buf[0];

	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
		return -EINVAL;

	*pipe = name - 'A';

	return 0;
}

static int
4228
display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4229 4230 4231 4232 4233
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
		if (!strcmp(buf, pipe_crc_sources[i])) {
4234
			*s = i;
4235 4236 4237 4238 4239 4240
			return 0;
		    }

	return -EINVAL;
}

4241
static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4242
{
4243
#define N_WORDS 3
4244
	int n_words;
4245
	char *words[N_WORDS];
4246
	enum pipe pipe;
4247
	enum intel_pipe_crc_object object;
4248 4249
	enum intel_pipe_crc_source source;

4250
	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4251 4252 4253 4254 4255 4256
	if (n_words != N_WORDS) {
		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
				 N_WORDS);
		return -EINVAL;
	}

4257
	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4258
		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4259 4260 4261
		return -EINVAL;
	}

4262
	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4263
		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4264 4265 4266
		return -EINVAL;
	}

4267
	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4268
		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4269 4270 4271 4272 4273 4274
		return -EINVAL;
	}

	return pipe_crc_set_source(dev, pipe, source);
}

4275 4276
static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
				     size_t len, loff_t *offp)
4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
	char *tmpbuf;
	int ret;

	if (len == 0)
		return 0;

	if (len > PAGE_SIZE - 1) {
		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
				 PAGE_SIZE);
		return -E2BIG;
	}

	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
	if (!tmpbuf)
		return -ENOMEM;

	if (copy_from_user(tmpbuf, ubuf, len)) {
		ret = -EFAULT;
		goto out;
	}
	tmpbuf[len] = '\0';

4302
	ret = display_crc_ctl_parse(dev, tmpbuf, len);
4303 4304 4305 4306 4307 4308 4309 4310 4311 4312

out:
	kfree(tmpbuf);
	if (ret < 0)
		return ret;

	*offp += len;
	return len;
}

4313
static const struct file_operations i915_display_crc_ctl_fops = {
4314
	.owner = THIS_MODULE,
4315
	.open = display_crc_ctl_open,
4316 4317 4318
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
4319
	.write = display_crc_ctl_write
4320 4321
};

4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333
static ssize_t i915_displayport_test_active_write(struct file *file,
					    const char __user *ubuf,
					    size_t len, loff_t *offp)
{
	char *input_buffer;
	int status = 0;
	struct drm_device *dev;
	struct drm_connector *connector;
	struct list_head *connector_list;
	struct intel_dp *intel_dp;
	int val = 0;

4334
	dev = ((struct seq_file *)file->private_data)->private;
4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358

	connector_list = &dev->mode_config.connector_list;

	if (len == 0)
		return 0;

	input_buffer = kmalloc(len + 1, GFP_KERNEL);
	if (!input_buffer)
		return -ENOMEM;

	if (copy_from_user(input_buffer, ubuf, len)) {
		status = -EFAULT;
		goto out;
	}

	input_buffer[len] = '\0';
	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

4359
		if (connector->status == connector_status_connected &&
4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			status = kstrtoint(input_buffer, 10, &val);
			if (status < 0)
				goto out;
			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
			/* To prevent erroneous activation of the compliance
			 * testing code, only accept an actual value of 1 here
			 */
			if (val == 1)
				intel_dp->compliance_test_active = 1;
			else
				intel_dp->compliance_test_active = 0;
		}
	}
out:
	kfree(input_buffer);
	if (status < 0)
		return status;

	*offp += len;
	return len;
}

static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			if (intel_dp->compliance_test_active)
				seq_puts(m, "1");
			else
				seq_puts(m, "0");
		} else
			seq_puts(m, "0");
	}

	return 0;
}

static int i915_displayport_test_active_open(struct inode *inode,
				       struct file *file)
{
	struct drm_device *dev = inode->i_private;

	return single_open(file, i915_displayport_test_active_show, dev);
}

static const struct file_operations i915_displayport_test_active_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_active_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = i915_displayport_test_active_write
};

static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			seq_printf(m, "%lx", intel_dp->compliance_test_data);
		} else
			seq_puts(m, "0");
	}

	return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
				       struct file *file)
{
	struct drm_device *dev = inode->i_private;

	return single_open(file, i915_displayport_test_data_show, dev);
}

static const struct file_operations i915_displayport_test_data_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_data_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
		} else
			seq_puts(m, "0");
	}

	return 0;
}

static int i915_displayport_test_type_open(struct inode *inode,
				       struct file *file)
{
	struct drm_device *dev = inode->i_private;

	return single_open(file, i915_displayport_test_type_show, dev);
}

static const struct file_operations i915_displayport_test_type_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_type_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

4507
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4508 4509 4510
{
	struct drm_device *dev = m->private;
	int level;
4511 4512 4513 4514 4515 4516 4517 4518
	int num_levels;

	if (IS_CHERRYVIEW(dev))
		num_levels = 3;
	else if (IS_VALLEYVIEW(dev))
		num_levels = 1;
	else
		num_levels = ilk_wm_max_level(dev) + 1;
4519 4520 4521 4522 4523 4524

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++) {
		unsigned int latency = wm[level];

4525 4526
		/*
		 * - WM1+ latency values in 0.5us units
4527
		 * - latencies are in us on gen9/vlv/chv
4528
		 */
4529 4530
		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
		    IS_CHERRYVIEW(dev))
4531 4532
			latency *= 10;
		else if (level > 0)
4533 4534 4535
			latency *= 5;

		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4536
			   level, wm[level], latency / 10, latency % 10);
4537 4538 4539 4540 4541 4542 4543 4544
	}

	drm_modeset_unlock_all(dev);
}

static int pri_wm_latency_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
4545
	struct drm_i915_private *dev_priv = to_i915(dev);
4546 4547 4548 4549 4550 4551
	const uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.pri_latency;
4552

4553
	wm_latency_show(m, latencies);
4554 4555 4556 4557 4558 4559 4560

	return 0;
}

static int spr_wm_latency_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
4561
	struct drm_i915_private *dev_priv = to_i915(dev);
4562 4563 4564 4565 4566 4567
	const uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.spr_latency;
4568

4569
	wm_latency_show(m, latencies);
4570 4571 4572 4573 4574 4575 4576

	return 0;
}

static int cur_wm_latency_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
4577
	struct drm_i915_private *dev_priv = to_i915(dev);
4578 4579 4580 4581 4582 4583
	const uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.cur_latency;
4584

4585
	wm_latency_show(m, latencies);
4586 4587 4588 4589 4590 4591 4592 4593

	return 0;
}

static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;

4594
	if (INTEL_INFO(dev)->gen < 5)
4595 4596 4597 4598 4599 4600 4601 4602 4603
		return -ENODEV;

	return single_open(file, pri_wm_latency_show, dev);
}

static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;

4604
	if (HAS_GMCH_DISPLAY(dev))
4605 4606 4607 4608 4609 4610 4611 4612 4613
		return -ENODEV;

	return single_open(file, spr_wm_latency_show, dev);
}

static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;

4614
	if (HAS_GMCH_DISPLAY(dev))
4615 4616 4617 4618 4619 4620
		return -ENODEV;

	return single_open(file, cur_wm_latency_show, dev);
}

static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4621
				size_t len, loff_t *offp, uint16_t wm[8])
4622 4623 4624
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4625
	uint16_t new[8] = { 0 };
4626
	int num_levels;
4627 4628 4629 4630
	int level;
	int ret;
	char tmp[32];

4631 4632 4633 4634 4635 4636 4637
	if (IS_CHERRYVIEW(dev))
		num_levels = 3;
	else if (IS_VALLEYVIEW(dev))
		num_levels = 1;
	else
		num_levels = ilk_wm_max_level(dev) + 1;

4638 4639 4640 4641 4642 4643 4644 4645
	if (len >= sizeof(tmp))
		return -EINVAL;

	if (copy_from_user(tmp, ubuf, len))
		return -EFAULT;

	tmp[len] = '\0';

4646 4647 4648
	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
		     &new[0], &new[1], &new[2], &new[3],
		     &new[4], &new[5], &new[6], &new[7]);
4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667
	if (ret != num_levels)
		return -EINVAL;

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++)
		wm[level] = new[level];

	drm_modeset_unlock_all(dev);

	return len;
}


static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4668
	struct drm_i915_private *dev_priv = to_i915(dev);
4669
	uint16_t *latencies;
4670

4671 4672 4673 4674 4675 4676
	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.pri_latency;

	return wm_latency_write(file, ubuf, len, offp, latencies);
4677 4678 4679 4680 4681 4682 4683
}

static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4684
	struct drm_i915_private *dev_priv = to_i915(dev);
4685
	uint16_t *latencies;
4686

4687 4688 4689 4690 4691 4692
	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.spr_latency;

	return wm_latency_write(file, ubuf, len, offp, latencies);
4693 4694 4695 4696 4697 4698 4699
}

static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4700
	struct drm_i915_private *dev_priv = to_i915(dev);
4701 4702 4703 4704 4705 4706
	uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.cur_latency;
4707

4708
	return wm_latency_write(file, ubuf, len, offp, latencies);
4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737
}

static const struct file_operations i915_pri_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = pri_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = pri_wm_latency_write
};

static const struct file_operations i915_spr_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = spr_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = spr_wm_latency_write
};

static const struct file_operations i915_cur_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = cur_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = cur_wm_latency_write
};

4738 4739
static int
i915_wedged_get(void *data, u64 *val)
4740
{
4741
	struct drm_device *dev = data;
4742
	struct drm_i915_private *dev_priv = to_i915(dev);
4743

4744
	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4745

4746
	return 0;
4747 4748
}

4749 4750
static int
i915_wedged_set(void *data, u64 val)
4751
{
4752
	struct drm_device *dev = data;
4753
	struct drm_i915_private *dev_priv = to_i915(dev);
4754

4755 4756 4757 4758 4759 4760 4761 4762
	/*
	 * There is no safeguard against this debugfs entry colliding
	 * with the hangcheck calling same i915_handle_error() in
	 * parallel, causing an explosion. For now we assume that the
	 * test harness is responsible enough not to inject gpu hangs
	 * while it is writing to 'i915_wedged'
	 */

4763
	if (i915_reset_in_progress(&dev_priv->gpu_error))
4764 4765
		return -EAGAIN;

4766
	intel_runtime_pm_get(dev_priv);
4767

4768
	i915_handle_error(dev_priv, val,
4769
			  "Manually setting wedged to %llu", val);
4770 4771 4772

	intel_runtime_pm_put(dev_priv);

4773
	return 0;
4774 4775
}

4776 4777
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
4778
			"%llu\n");
4779

4780 4781 4782 4783
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
4784
	struct drm_i915_private *dev_priv = to_i915(dev);
4785 4786 4787 4788 4789 4790 4791 4792 4793

	*val = dev_priv->gpu_error.missed_irq_rings;
	return 0;
}

static int
i915_ring_missed_irq_set(void *data, u64 val)
{
	struct drm_device *dev = data;
4794
	struct drm_i915_private *dev_priv = to_i915(dev);
4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814
	int ret;

	/* Lock against concurrent debugfs callers */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
	dev_priv->gpu_error.missed_irq_rings = val;
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
			"0x%08llx\n");

static int
i915_ring_test_irq_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
4815
	struct drm_i915_private *dev_priv = to_i915(dev);
4816 4817 4818 4819 4820 4821 4822 4823 4824 4825

	*val = dev_priv->gpu_error.test_irq_rings;

	return 0;
}

static int
i915_ring_test_irq_set(void *data, u64 val)
{
	struct drm_device *dev = data;
4826
	struct drm_i915_private *dev_priv = to_i915(dev);
4827

4828
	val &= INTEL_INFO(dev_priv)->ring_mask;
4829 4830 4831 4832 4833 4834 4835 4836 4837 4838
	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
	dev_priv->gpu_error.test_irq_rings = val;

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
			i915_ring_test_irq_get, i915_ring_test_irq_set,
			"0x%08llx\n");

4839 4840 4841 4842 4843 4844 4845 4846
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_ALL (DROP_UNBOUND | \
		  DROP_BOUND | \
		  DROP_RETIRE | \
		  DROP_ACTIVE)
4847 4848
static int
i915_drop_caches_get(void *data, u64 *val)
4849
{
4850
	*val = DROP_ALL;
4851

4852
	return 0;
4853 4854
}

4855 4856
static int
i915_drop_caches_set(void *data, u64 val)
4857
{
4858
	struct drm_device *dev = data;
4859
	struct drm_i915_private *dev_priv = to_i915(dev);
4860
	int ret;
4861

4862
	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4863 4864 4865 4866 4867 4868 4869 4870

	/* No need to check and wait for gpu resets, only libdrm auto-restarts
	 * on ioctls on -EAGAIN. */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	if (val & DROP_ACTIVE) {
4871
		ret = i915_gem_wait_for_idle(dev_priv, true);
4872 4873 4874 4875 4876
		if (ret)
			goto unlock;
	}

	if (val & (DROP_RETIRE | DROP_ACTIVE))
4877
		i915_gem_retire_requests(dev_priv);
4878

4879 4880
	if (val & DROP_BOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4881

4882 4883
	if (val & DROP_UNBOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4884 4885 4886 4887

unlock:
	mutex_unlock(&dev->struct_mutex);

4888
	return ret;
4889 4890
}

4891 4892 4893
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
			i915_drop_caches_get, i915_drop_caches_set,
			"0x%08llx\n");
4894

4895 4896
static int
i915_max_freq_get(void *data, u64 *val)
4897
{
4898
	struct drm_device *dev = data;
4899
	struct drm_i915_private *dev_priv = to_i915(dev);
4900

4901
	if (INTEL_INFO(dev)->gen < 6)
4902 4903
		return -ENODEV;

4904
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4905
	return 0;
4906 4907
}

4908 4909
static int
i915_max_freq_set(void *data, u64 val)
4910
{
4911
	struct drm_device *dev = data;
4912
	struct drm_i915_private *dev_priv = to_i915(dev);
4913
	u32 hw_max, hw_min;
4914
	int ret;
4915

4916
	if (INTEL_INFO(dev)->gen < 6)
4917
		return -ENODEV;
4918

4919
	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4920

4921
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4922 4923 4924
	if (ret)
		return ret;

4925 4926 4927
	/*
	 * Turbo will still be enabled, but won't go above the set value.
	 */
4928
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4929

4930 4931
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4932

4933
	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
J
Jeff McGee 已提交
4934 4935
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4936 4937
	}

4938
	dev_priv->rps.max_freq_softlimit = val;
J
Jeff McGee 已提交
4939

4940
	intel_set_rps(dev_priv, val);
J
Jeff McGee 已提交
4941

4942
	mutex_unlock(&dev_priv->rps.hw_lock);
4943

4944
	return 0;
4945 4946
}

4947 4948
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
			i915_max_freq_get, i915_max_freq_set,
4949
			"%llu\n");
4950

4951 4952
static int
i915_min_freq_get(void *data, u64 *val)
4953
{
4954
	struct drm_device *dev = data;
4955
	struct drm_i915_private *dev_priv = to_i915(dev);
4956

4957
	if (INTEL_GEN(dev_priv) < 6)
4958 4959
		return -ENODEV;

4960
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4961
	return 0;
4962 4963
}

4964 4965
static int
i915_min_freq_set(void *data, u64 val)
4966
{
4967
	struct drm_device *dev = data;
4968
	struct drm_i915_private *dev_priv = to_i915(dev);
4969
	u32 hw_max, hw_min;
4970
	int ret;
4971

4972
	if (INTEL_GEN(dev_priv) < 6)
4973
		return -ENODEV;
4974

4975
	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4976

4977
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4978 4979 4980
	if (ret)
		return ret;

4981 4982 4983
	/*
	 * Turbo will still be enabled, but won't go below the set value.
	 */
4984
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4985

4986 4987
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4988

4989
	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
J
Jeff McGee 已提交
4990 4991
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4992
	}
J
Jeff McGee 已提交
4993

4994
	dev_priv->rps.min_freq_softlimit = val;
J
Jeff McGee 已提交
4995

4996
	intel_set_rps(dev_priv, val);
J
Jeff McGee 已提交
4997

4998
	mutex_unlock(&dev_priv->rps.hw_lock);
4999

5000
	return 0;
5001 5002
}

5003 5004
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
			i915_min_freq_get, i915_min_freq_set,
5005
			"%llu\n");
5006

5007 5008
static int
i915_cache_sharing_get(void *data, u64 *val)
5009
{
5010
	struct drm_device *dev = data;
5011
	struct drm_i915_private *dev_priv = to_i915(dev);
5012
	u32 snpcr;
5013
	int ret;
5014

5015 5016 5017
	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
		return -ENODEV;

5018 5019 5020
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
5021
	intel_runtime_pm_get(dev_priv);
5022

5023
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5024 5025

	intel_runtime_pm_put(dev_priv);
5026
	mutex_unlock(&dev_priv->drm.struct_mutex);
5027

5028
	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5029

5030
	return 0;
5031 5032
}

5033 5034
static int
i915_cache_sharing_set(void *data, u64 val)
5035
{
5036
	struct drm_device *dev = data;
5037
	struct drm_i915_private *dev_priv = to_i915(dev);
5038 5039
	u32 snpcr;

5040 5041 5042
	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
		return -ENODEV;

5043
	if (val > 3)
5044 5045
		return -EINVAL;

5046
	intel_runtime_pm_get(dev_priv);
5047
	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
5048 5049 5050 5051 5052 5053 5054

	/* Update the cache sharing policy here as well */
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);

5055
	intel_runtime_pm_put(dev_priv);
5056
	return 0;
5057 5058
}

5059 5060 5061
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
			i915_cache_sharing_get, i915_cache_sharing_set,
			"%llu\n");
5062

5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073
struct sseu_dev_status {
	unsigned int slice_total;
	unsigned int subslice_total;
	unsigned int subslice_per_slice;
	unsigned int eu_total;
	unsigned int eu_per_subslice;
};

static void cherryview_sseu_device_status(struct drm_device *dev,
					  struct sseu_dev_status *stat)
{
5074
	struct drm_i915_private *dev_priv = to_i915(dev);
5075
	int ss_max = 2;
5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105
	int ss;
	u32 sig1[ss_max], sig2[ss_max];

	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);

	for (ss = 0; ss < ss_max; ss++) {
		unsigned int eu_cnt;

		if (sig1[ss] & CHV_SS_PG_ENABLE)
			/* skip disabled subslice */
			continue;

		stat->slice_total = 1;
		stat->subslice_per_slice++;
		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
		stat->eu_total += eu_cnt;
		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
	}
	stat->subslice_total = stat->subslice_per_slice;
}

static void gen9_sseu_device_status(struct drm_device *dev,
				    struct sseu_dev_status *stat)
{
5106
	struct drm_i915_private *dev_priv = to_i915(dev);
5107
	int s_max = 3, ss_max = 4;
5108 5109 5110
	int s, ss;
	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];

5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122
	/* BXT has a single slice and at most 3 subslices. */
	if (IS_BROXTON(dev)) {
		s_max = 1;
		ss_max = 3;
	}

	for (s = 0; s < s_max; s++) {
		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
	}

5123 5124 5125 5126 5127 5128 5129 5130 5131 5132
	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
		     GEN9_PGCTL_SSA_EU19_ACK |
		     GEN9_PGCTL_SSA_EU210_ACK |
		     GEN9_PGCTL_SSA_EU311_ACK;
	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
		     GEN9_PGCTL_SSB_EU19_ACK |
		     GEN9_PGCTL_SSB_EU210_ACK |
		     GEN9_PGCTL_SSB_EU311_ACK;

	for (s = 0; s < s_max; s++) {
5133 5134
		unsigned int ss_cnt = 0;

5135 5136 5137 5138 5139
		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
			/* skip disabled slice */
			continue;

		stat->slice_total++;
5140

5141
		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5142 5143
			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;

5144 5145 5146
		for (ss = 0; ss < ss_max; ss++) {
			unsigned int eu_cnt;

5147 5148 5149 5150 5151 5152 5153 5154
			if (IS_BROXTON(dev) &&
			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
				/* skip disabled subslice */
				continue;

			if (IS_BROXTON(dev))
				ss_cnt++;

5155 5156 5157 5158 5159 5160
			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
					       eu_mask[ss%2]);
			stat->eu_total += eu_cnt;
			stat->eu_per_subslice = max(stat->eu_per_subslice,
						    eu_cnt);
		}
5161 5162 5163 5164

		stat->subslice_total += ss_cnt;
		stat->subslice_per_slice = max(stat->subslice_per_slice,
					       ss_cnt);
5165 5166 5167
	}
}

5168 5169 5170
static void broadwell_sseu_device_status(struct drm_device *dev,
					 struct sseu_dev_status *stat)
{
5171
	struct drm_i915_private *dev_priv = to_i915(dev);
5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192
	int s;
	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);

	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);

	if (stat->slice_total) {
		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
		stat->subslice_total = stat->slice_total *
				       stat->subslice_per_slice;
		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;

		/* subtract fused off EU(s) from enabled slice(s) */
		for (s = 0; s < stat->slice_total; s++) {
			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];

			stat->eu_total -= hweight8(subslice_7eu);
		}
	}
}

5193 5194 5195
static int i915_sseu_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
5196 5197
	struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
	struct drm_device *dev = &dev_priv->drm;
5198
	struct sseu_dev_status stat;
5199

5200
	if (INTEL_INFO(dev)->gen < 8)
5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213
		return -ENODEV;

	seq_puts(m, "SSEU Device Info\n");
	seq_printf(m, "  Available Slice Total: %u\n",
		   INTEL_INFO(dev)->slice_total);
	seq_printf(m, "  Available Subslice Total: %u\n",
		   INTEL_INFO(dev)->subslice_total);
	seq_printf(m, "  Available Subslice Per Slice: %u\n",
		   INTEL_INFO(dev)->subslice_per_slice);
	seq_printf(m, "  Available EU Total: %u\n",
		   INTEL_INFO(dev)->eu_total);
	seq_printf(m, "  Available EU Per Subslice: %u\n",
		   INTEL_INFO(dev)->eu_per_subslice);
5214 5215 5216 5217
	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
	if (HAS_POOLED_EU(dev))
		seq_printf(m, "  Min EU in pool: %u\n",
			   INTEL_INFO(dev)->min_eu_in_pool);
5218 5219 5220 5221 5222 5223 5224
	seq_printf(m, "  Has Slice Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_slice_pg));
	seq_printf(m, "  Has Subslice Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_subslice_pg));
	seq_printf(m, "  Has EU Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_eu_pg));

5225
	seq_puts(m, "SSEU Device Status\n");
5226
	memset(&stat, 0, sizeof(stat));
5227 5228 5229

	intel_runtime_pm_get(dev_priv);

5230
	if (IS_CHERRYVIEW(dev)) {
5231
		cherryview_sseu_device_status(dev, &stat);
5232 5233
	} else if (IS_BROADWELL(dev)) {
		broadwell_sseu_device_status(dev, &stat);
5234
	} else if (INTEL_INFO(dev)->gen >= 9) {
5235
		gen9_sseu_device_status(dev, &stat);
5236
	}
5237 5238 5239

	intel_runtime_pm_put(dev_priv);

5240 5241 5242 5243 5244 5245 5246 5247 5248 5249
	seq_printf(m, "  Enabled Slice Total: %u\n",
		   stat.slice_total);
	seq_printf(m, "  Enabled Subslice Total: %u\n",
		   stat.subslice_total);
	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
		   stat.subslice_per_slice);
	seq_printf(m, "  Enabled EU Total: %u\n",
		   stat.eu_total);
	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
		   stat.eu_per_subslice);
5250

5251 5252 5253
	return 0;
}

5254 5255 5256
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;
5257
	struct drm_i915_private *dev_priv = to_i915(dev);
5258

5259
	if (INTEL_INFO(dev)->gen < 6)
5260 5261
		return 0;

5262
	intel_runtime_pm_get(dev_priv);
5263
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5264 5265 5266 5267

	return 0;
}

5268
static int i915_forcewake_release(struct inode *inode, struct file *file)
5269 5270
{
	struct drm_device *dev = inode->i_private;
5271
	struct drm_i915_private *dev_priv = to_i915(dev);
5272

5273
	if (INTEL_INFO(dev)->gen < 6)
5274 5275
		return 0;

5276
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5277
	intel_runtime_pm_put(dev_priv);
5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293

	return 0;
}

static const struct file_operations i915_forcewake_fops = {
	.owner = THIS_MODULE,
	.open = i915_forcewake_open,
	.release = i915_forcewake_release,
};

static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
{
	struct drm_device *dev = minor->dev;
	struct dentry *ent;

	ent = debugfs_create_file("i915_forcewake_user",
B
Ben Widawsky 已提交
5294
				  S_IRUSR,
5295 5296
				  root, dev,
				  &i915_forcewake_fops);
5297 5298
	if (!ent)
		return -ENOMEM;
5299

B
Ben Widawsky 已提交
5300
	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5301 5302
}

5303 5304 5305 5306
static int i915_debugfs_create(struct dentry *root,
			       struct drm_minor *minor,
			       const char *name,
			       const struct file_operations *fops)
5307 5308 5309 5310
{
	struct drm_device *dev = minor->dev;
	struct dentry *ent;

5311
	ent = debugfs_create_file(name,
5312 5313
				  S_IRUGO | S_IWUSR,
				  root, dev,
5314
				  fops);
5315 5316
	if (!ent)
		return -ENOMEM;
5317

5318
	return drm_add_fake_info_node(minor, ent, fops);
5319 5320
}

5321
static const struct drm_info_list i915_debugfs_list[] = {
C
Chris Wilson 已提交
5322
	{"i915_capabilities", i915_capabilities, 0},
5323
	{"i915_gem_objects", i915_gem_object_info, 0},
5324
	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5325
	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
5326
	{"i915_gem_stolen", i915_gem_stolen_list_info },
5327
	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5328 5329
	{"i915_gem_request", i915_gem_request_info, 0},
	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5330
	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5331
	{"i915_gem_interrupt", i915_interrupt_info, 0},
5332 5333 5334
	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
X
Xiang, Haihao 已提交
5335
	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5336
	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5337
	{"i915_guc_info", i915_guc_info, 0},
5338
	{"i915_guc_load_status", i915_guc_load_status_info, 0},
A
Alex Dai 已提交
5339
	{"i915_guc_log_dump", i915_guc_log_dump, 0},
5340
	{"i915_frequency_info", i915_frequency_info, 0},
5341
	{"i915_hangcheck_info", i915_hangcheck_info, 0},
5342
	{"i915_drpc_info", i915_drpc_info, 0},
5343
	{"i915_emon_status", i915_emon_status, 0},
5344
	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5345
	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5346
	{"i915_fbc_status", i915_fbc_status, 0},
5347
	{"i915_ips_status", i915_ips_status, 0},
5348
	{"i915_sr_status", i915_sr_status, 0},
5349
	{"i915_opregion", i915_opregion, 0},
5350
	{"i915_vbt", i915_vbt, 0},
5351
	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5352
	{"i915_context_status", i915_context_status, 0},
5353
	{"i915_dump_lrc", i915_dump_lrc, 0},
5354
	{"i915_execlists", i915_execlists, 0},
5355
	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5356
	{"i915_swizzle_info", i915_swizzle_info, 0},
D
Daniel Vetter 已提交
5357
	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5358
	{"i915_llc", i915_llc, 0},
5359
	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5360
	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5361
	{"i915_energy_uJ", i915_energy_uJ, 0},
5362
	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5363
	{"i915_power_domain_info", i915_power_domain_info, 0},
5364
	{"i915_dmc_info", i915_dmc_info, 0},
5365
	{"i915_display_info", i915_display_info, 0},
B
Ben Widawsky 已提交
5366
	{"i915_semaphore_status", i915_semaphore_status, 0},
5367
	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5368
	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5369
	{"i915_wa_registers", i915_wa_registers, 0},
5370
	{"i915_ddb_info", i915_ddb_info, 0},
5371
	{"i915_sseu_status", i915_sseu_status, 0},
5372
	{"i915_drrs_status", i915_drrs_status, 0},
5373
	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5374
};
5375
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5376

5377
static const struct i915_debugfs_files {
5378 5379 5380 5381 5382 5383 5384
	const char *name;
	const struct file_operations *fops;
} i915_debugfs_files[] = {
	{"i915_wedged", &i915_wedged_fops},
	{"i915_max_freq", &i915_max_freq_fops},
	{"i915_min_freq", &i915_min_freq_fops},
	{"i915_cache_sharing", &i915_cache_sharing_fops},
5385 5386
	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5387 5388 5389
	{"i915_gem_drop_caches", &i915_drop_caches_fops},
	{"i915_error_state", &i915_error_state_fops},
	{"i915_next_seqno", &i915_next_seqno_fops},
5390
	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5391 5392 5393
	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5394
	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5395 5396 5397
	{"i915_dp_test_data", &i915_displayport_test_data_fops},
	{"i915_dp_test_type", &i915_displayport_test_type_fops},
	{"i915_dp_test_active", &i915_displayport_test_active_fops}
5398 5399
};

5400 5401
void intel_display_crc_init(struct drm_device *dev)
{
5402
	struct drm_i915_private *dev_priv = to_i915(dev);
5403
	enum pipe pipe;
5404

5405
	for_each_pipe(dev_priv, pipe) {
5406
		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5407

5408 5409
		pipe_crc->opened = false;
		spin_lock_init(&pipe_crc->lock);
5410 5411 5412 5413
		init_waitqueue_head(&pipe_crc->wq);
	}
}

5414
int i915_debugfs_register(struct drm_i915_private *dev_priv)
5415
{
5416
	struct drm_minor *minor = dev_priv->drm.primary;
5417
	int ret, i;
5418

5419
	ret = i915_forcewake_create(minor->debugfs_root, minor);
5420 5421
	if (ret)
		return ret;
5422

5423 5424 5425 5426 5427 5428
	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
		if (ret)
			return ret;
	}

5429 5430 5431 5432 5433 5434 5435
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
		ret = i915_debugfs_create(minor->debugfs_root, minor,
					  i915_debugfs_files[i].name,
					  i915_debugfs_files[i].fops);
		if (ret)
			return ret;
	}
5436

5437 5438
	return drm_debugfs_create_files(i915_debugfs_list,
					I915_DEBUGFS_ENTRIES,
5439 5440 5441
					minor->debugfs_root, minor);
}

5442
void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
5443
{
5444
	struct drm_minor *minor = dev_priv->drm.primary;
5445 5446
	int i;

5447 5448
	drm_debugfs_remove_files(i915_debugfs_list,
				 I915_DEBUGFS_ENTRIES, minor);
5449

5450 5451
	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
				 1, minor);
5452

D
Daniel Vetter 已提交
5453
	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5454 5455 5456 5457 5458 5459
		struct drm_info_list *info_list =
			(struct drm_info_list *)&i915_pipe_crc_data[i];

		drm_debugfs_remove_files(info_list, 1, minor);
	}

5460 5461 5462 5463 5464 5465
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
		struct drm_info_list *info_list =
			(struct drm_info_list *) i915_debugfs_files[i].fops;

		drm_debugfs_remove_files(info_list, 1, minor);
	}
5466
}
5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500

struct dpcd_block {
	/* DPCD dump start address. */
	unsigned int offset;
	/* DPCD dump end address, inclusive. If unset, .size will be used. */
	unsigned int end;
	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
	size_t size;
	/* Only valid for eDP. */
	bool edp;
};

static const struct dpcd_block i915_dpcd_debug[] = {
	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
	{ .offset = DP_SET_POWER },
	{ .offset = DP_EDP_DPCD_REV },
	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
};

static int i915_dpcd_show(struct seq_file *m, void *data)
{
	struct drm_connector *connector = m->private;
	struct intel_dp *intel_dp =
		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
	uint8_t buf[16];
	ssize_t err;
	int i;

5501 5502 5503
	if (connector->status != connector_status_connected)
		return -ENODEV;

5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523
	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
		const struct dpcd_block *b = &i915_dpcd_debug[i];
		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);

		if (b->edp &&
		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		/* low tech for now */
		if (WARN_ON(size > sizeof(buf)))
			continue;

		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
		if (err <= 0) {
			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
				  size, b->offset, err);
			continue;
		}

		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5524
	}
5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565

	return 0;
}

static int i915_dpcd_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_dpcd_show, inode->i_private);
}

static const struct file_operations i915_dpcd_fops = {
	.owner = THIS_MODULE,
	.open = i915_dpcd_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

/**
 * i915_debugfs_connector_add - add i915 specific connector debugfs files
 * @connector: pointer to a registered drm_connector
 *
 * Cleanup will be done by drm_connector_unregister() through a call to
 * drm_debugfs_connector_remove().
 *
 * Returns 0 on success, negative error codes on error.
 */
int i915_debugfs_connector_add(struct drm_connector *connector)
{
	struct dentry *root = connector->debugfs_entry;

	/* The connector must have been registered beforehands. */
	if (!root)
		return -ENODEV;

	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
				    &i915_dpcd_fops);

	return 0;
}