i915_debugfs.c 149.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Keith Packard <keithp@keithp.com>
 *
 */

#include <linux/seq_file.h>
30
#include <linux/circ_buf.h>
31
#include <linux/ctype.h>
32
#include <linux/debugfs.h>
33
#include <linux/slab.h>
34
#include <linux/export.h>
35
#include <linux/list_sort.h>
36
#include <asm/msr-index.h>
37
#include <drm/drmP.h>
38
#include "intel_drv.h"
39
#include "intel_ringbuffer.h"
40
#include <drm/i915_drm.h>
41 42
#include "i915_drv.h"

C
Chris Wilson 已提交
43
enum {
44
	ACTIVE_LIST,
C
Chris Wilson 已提交
45
	INACTIVE_LIST,
46
	PINNED_LIST,
C
Chris Wilson 已提交
47
};
48

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/* As the drm_debugfs_init() routines are called before dev->dev_private is
 * allocated we need to hook into the minor for release. */
static int
drm_add_fake_info_node(struct drm_minor *minor,
		       struct dentry *ent,
		       const void *key)
{
	struct drm_info_node *node;

	node = kmalloc(sizeof(*node), GFP_KERNEL);
	if (node == NULL) {
		debugfs_remove(ent);
		return -ENOMEM;
	}

	node->minor = minor;
	node->dent = ent;
	node->info_ent = (void *) key;

	mutex_lock(&minor->debugfs_lock);
	list_add(&node->list, &minor->debugfs_list);
	mutex_unlock(&minor->debugfs_lock);

	return 0;
}

75 76
static int i915_capabilities(struct seq_file *m, void *data)
{
77
	struct drm_info_node *node = m->private;
78 79 80 81
	struct drm_device *dev = node->minor->dev;
	const struct intel_device_info *info = INTEL_INFO(dev);

	seq_printf(m, "gen: %d\n", info->gen);
82
	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
83 84 85 86 87
#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
#undef PRINT_FLAG
#undef SEP_SEMICOLON
88 89 90

	return 0;
}
91

92
static char get_active_flag(struct drm_i915_gem_object *obj)
93
{
94
	return obj->active ? '*' : ' ';
95 96
}

97
static char get_pin_flag(struct drm_i915_gem_object *obj)
98 99 100 101
{
	return obj->pin_display ? 'p' : ' ';
}

102
static char get_tiling_flag(struct drm_i915_gem_object *obj)
103
{
104 105
	switch (obj->tiling_mode) {
	default:
106 107 108
	case I915_TILING_NONE: return ' ';
	case I915_TILING_X: return 'X';
	case I915_TILING_Y: return 'Y';
109
	}
110 111
}

112
static char get_global_flag(struct drm_i915_gem_object *obj)
113 114 115 116
{
	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}

117
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
118
{
119
	return obj->mapping ? 'M' : ' ';
B
Ben Widawsky 已提交
120 121
}

122 123 124 125 126
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
{
	u64 size = 0;
	struct i915_vma *vma;

127
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
128
		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
129 130 131 132 133 134
			size += vma->node.size;
	}

	return size;
}

135 136 137
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
138
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
139
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
140
	struct i915_vma *vma;
B
Ben Widawsky 已提交
141
	int pin_count = 0;
142
	enum intel_engine_id id;
B
Ben Widawsky 已提交
143

144 145
	lockdep_assert_held(&obj->base.dev->struct_mutex);

146
	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
147
		   &obj->base,
148
		   get_active_flag(obj),
149 150
		   get_pin_flag(obj),
		   get_tiling_flag(obj),
B
Ben Widawsky 已提交
151
		   get_global_flag(obj),
152
		   get_pin_mapped_flag(obj),
153
		   obj->base.size / 1024,
154
		   obj->base.read_domains,
155
		   obj->base.write_domain);
156
	for_each_engine_id(engine, dev_priv, id)
157
		seq_printf(m, "%x ",
158
				i915_gem_request_get_seqno(obj->last_read_req[id]));
159
	seq_printf(m, "] %x %x%s%s%s",
160 161
		   i915_gem_request_get_seqno(obj->last_write_req),
		   i915_gem_request_get_seqno(obj->last_fenced_req),
162
		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
163 164 165 166
		   obj->dirty ? " dirty" : "",
		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
167
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
B
Ben Widawsky 已提交
168 169
		if (vma->pin_count > 0)
			pin_count++;
D
Dan Carpenter 已提交
170 171
	}
	seq_printf(m, " (pinned x %d)", pin_count);
172 173
	if (obj->pin_display)
		seq_printf(m, " (display)");
174 175
	if (obj->fence_reg != I915_FENCE_REG_NONE)
		seq_printf(m, " (fence: %d)", obj->fence_reg);
176
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
177
		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
178
			   vma->is_ggtt ? "g" : "pp",
179
			   vma->node.start, vma->node.size);
180 181 182
		if (vma->is_ggtt)
			seq_printf(m, ", type: %u", vma->ggtt_view.type);
		seq_puts(m, ")");
B
Ben Widawsky 已提交
183
	}
184
	if (obj->stolen)
185
		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
186
	if (obj->pin_display || obj->fault_mappable) {
187
		char s[3], *t = s;
188
		if (obj->pin_display)
189 190 191 192 193 194
			*t++ = 'p';
		if (obj->fault_mappable)
			*t++ = 'f';
		*t = '\0';
		seq_printf(m, " (%s mappable)", s);
	}
195
	if (obj->last_write_req != NULL)
196
		seq_printf(m, " (%s)",
197
			   i915_gem_request_get_engine(obj->last_write_req)->name);
198 199
	if (obj->frontbuffer_bits)
		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200 201
}

202
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203
{
204
	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205 206 207 208
	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
	seq_putc(m, ' ');
}

209
static int i915_gem_object_list_info(struct seq_file *m, void *data)
210
{
211
	struct drm_info_node *node = m->private;
212 213
	uintptr_t list = (uintptr_t) node->info_ent->data;
	struct list_head *head;
214
	struct drm_device *dev = node->minor->dev;
215 216
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
B
Ben Widawsky 已提交
217
	struct i915_vma *vma;
218
	u64 total_obj_size, total_gtt_size;
219
	int count, ret;
220 221 222 223

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
224

B
Ben Widawsky 已提交
225
	/* FIXME: the user of this interface might want more than just GGTT */
226 227
	switch (list) {
	case ACTIVE_LIST:
228
		seq_puts(m, "Active:\n");
229
		head = &ggtt->base.active_list;
230 231
		break;
	case INACTIVE_LIST:
232
		seq_puts(m, "Inactive:\n");
233
		head = &ggtt->base.inactive_list;
234 235
		break;
	default:
236 237
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
238 239
	}

240
	total_obj_size = total_gtt_size = count = 0;
241
	list_for_each_entry(vma, head, vm_link) {
B
Ben Widawsky 已提交
242 243 244 245 246
		seq_printf(m, "   ");
		describe_obj(m, vma->obj);
		seq_printf(m, "\n");
		total_obj_size += vma->obj->base.size;
		total_gtt_size += vma->node.size;
247
		count++;
248
	}
249
	mutex_unlock(&dev->struct_mutex);
250

251
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
252
		   count, total_obj_size, total_gtt_size);
253 254 255
	return 0;
}

256 257 258 259
static int obj_rank_by_stolen(void *priv,
			      struct list_head *A, struct list_head *B)
{
	struct drm_i915_gem_object *a =
260
		container_of(A, struct drm_i915_gem_object, obj_exec_link);
261
	struct drm_i915_gem_object *b =
262
		container_of(B, struct drm_i915_gem_object, obj_exec_link);
263

R
Rasmus Villemoes 已提交
264 265 266 267 268
	if (a->stolen->start < b->stolen->start)
		return -1;
	if (a->stolen->start > b->stolen->start)
		return 1;
	return 0;
269 270 271 272
}

static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
273
	struct drm_info_node *node = m->private;
274 275 276
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
277
	u64 total_obj_size, total_gtt_size;
278 279 280 281 282 283 284 285 286 287 288 289
	LIST_HEAD(stolen);
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
		if (obj->stolen == NULL)
			continue;

290
		list_add(&obj->obj_exec_link, &stolen);
291 292

		total_obj_size += obj->base.size;
293
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
294 295 296 297 298 299
		count++;
	}
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
		if (obj->stolen == NULL)
			continue;

300
		list_add(&obj->obj_exec_link, &stolen);
301 302 303 304 305 306 307

		total_obj_size += obj->base.size;
		count++;
	}
	list_sort(NULL, &stolen, obj_rank_by_stolen);
	seq_puts(m, "Stolen:\n");
	while (!list_empty(&stolen)) {
308
		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
309 310 311
		seq_puts(m, "   ");
		describe_obj(m, obj);
		seq_putc(m, '\n');
312
		list_del_init(&obj->obj_exec_link);
313 314 315
	}
	mutex_unlock(&dev->struct_mutex);

316
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
317 318 319 320
		   count, total_obj_size, total_gtt_size);
	return 0;
}

321 322
#define count_objects(list, member) do { \
	list_for_each_entry(obj, list, member) { \
323
		size += i915_gem_obj_total_ggtt_size(obj); \
324 325
		++count; \
		if (obj->map_and_fenceable) { \
326
			mappable_size += i915_gem_obj_ggtt_size(obj); \
327 328 329
			++mappable_count; \
		} \
	} \
330
} while (0)
331

332
struct file_stats {
333
	struct drm_i915_file_private *file_priv;
334 335 336 337
	unsigned long count;
	u64 total, unbound;
	u64 global, shared;
	u64 active, inactive;
338 339 340 341 342 343
};

static int per_file_stats(int id, void *ptr, void *data)
{
	struct drm_i915_gem_object *obj = ptr;
	struct file_stats *stats = data;
344
	struct i915_vma *vma;
345 346 347 348

	stats->count++;
	stats->total += obj->base.size;

349 350 351
	if (obj->base.name || obj->base.dma_buf)
		stats->shared += obj->base.size;

352
	if (USES_FULL_PPGTT(obj->base.dev)) {
353
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
354 355 356 357 358
			struct i915_hw_ppgtt *ppgtt;

			if (!drm_mm_node_allocated(&vma->node))
				continue;

359
			if (vma->is_ggtt) {
360 361 362 363 364
				stats->global += obj->base.size;
				continue;
			}

			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
365
			if (ppgtt->file_priv != stats->file_priv)
366 367
				continue;

368
			if (obj->active) /* XXX per-vma statistic */
369 370 371 372 373 374
				stats->active += obj->base.size;
			else
				stats->inactive += obj->base.size;

			return 0;
		}
375
	} else {
376 377
		if (i915_gem_obj_ggtt_bound(obj)) {
			stats->global += obj->base.size;
378
			if (obj->active)
379 380 381 382 383
				stats->active += obj->base.size;
			else
				stats->inactive += obj->base.size;
			return 0;
		}
384 385
	}

386 387 388
	if (!list_empty(&obj->global_list))
		stats->unbound += obj->base.size;

389 390 391
	return 0;
}

392 393
#define print_file_stats(m, name, stats) do { \
	if (stats.count) \
394
		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
395 396 397 398 399 400 401 402 403
			   name, \
			   stats.count, \
			   stats.total, \
			   stats.active, \
			   stats.inactive, \
			   stats.global, \
			   stats.shared, \
			   stats.unbound); \
} while (0)
404 405 406 407 408 409

static void print_batch_pool_stats(struct seq_file *m,
				   struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
	struct file_stats stats;
410
	struct intel_engine_cs *engine;
411
	int j;
412 413 414

	memset(&stats, 0, sizeof(stats));

415
	for_each_engine(engine, dev_priv) {
416
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
417
			list_for_each_entry(obj,
418
					    &engine->batch_pool.cache_list[j],
419 420 421
					    batch_pool_link)
				per_file_stats(0, obj, &stats);
		}
422
	}
423

424
	print_file_stats(m, "[k]batch pool", stats);
425 426
}

B
Ben Widawsky 已提交
427 428
#define count_vmas(list, member) do { \
	list_for_each_entry(vma, list, member) { \
429
		size += i915_gem_obj_total_ggtt_size(vma->obj); \
B
Ben Widawsky 已提交
430 431 432 433 434 435 436 437 438
		++count; \
		if (vma->obj->map_and_fenceable) { \
			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
			++mappable_count; \
		} \
	} \
} while (0)

static int i915_gem_object_info(struct seq_file *m, void* data)
439
{
440
	struct drm_info_node *node = m->private;
441
	struct drm_device *dev = node->minor->dev;
442 443
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
444
	u32 count, mappable_count, purgeable_count;
445
	u64 size, mappable_size, purgeable_size;
446 447
	unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
	u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
448
	struct drm_i915_gem_object *obj;
449
	struct drm_file *file;
B
Ben Widawsky 已提交
450
	struct i915_vma *vma;
451 452 453 454 455 456
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

457 458 459 460 461
	seq_printf(m, "%u objects, %zu bytes\n",
		   dev_priv->mm.object_count,
		   dev_priv->mm.object_memory);

	size = count = mappable_size = mappable_count = 0;
462
	count_objects(&dev_priv->mm.bound_list, global_list);
463
	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
464 465 466
		   count, mappable_count, size, mappable_size);

	size = count = mappable_size = mappable_count = 0;
467
	count_vmas(&ggtt->base.active_list, vm_link);
468
	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
469 470 471
		   count, mappable_count, size, mappable_size);

	size = count = mappable_size = mappable_count = 0;
472
	count_vmas(&ggtt->base.inactive_list, vm_link);
473
	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
474 475
		   count, mappable_count, size, mappable_size);

476
	size = count = purgeable_size = purgeable_count = 0;
477
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
C
Chris Wilson 已提交
478
		size += obj->base.size, ++count;
479 480
		if (obj->madv == I915_MADV_DONTNEED)
			purgeable_size += obj->base.size, ++purgeable_count;
481 482 483 484 485 486 487 488
		if (obj->mapping) {
			pin_mapped_count++;
			pin_mapped_size += obj->base.size;
			if (obj->pages_pin_count == 0) {
				pin_mapped_purgeable_count++;
				pin_mapped_purgeable_size += obj->base.size;
			}
		}
489
	}
490
	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
C
Chris Wilson 已提交
491

492
	size = count = mappable_size = mappable_count = 0;
493
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
494
		if (obj->fault_mappable) {
495
			size += i915_gem_obj_ggtt_size(obj);
496 497
			++count;
		}
498
		if (obj->pin_display) {
499
			mappable_size += i915_gem_obj_ggtt_size(obj);
500 501
			++mappable_count;
		}
502 503 504 505
		if (obj->madv == I915_MADV_DONTNEED) {
			purgeable_size += obj->base.size;
			++purgeable_count;
		}
506 507 508 509 510 511 512 513
		if (obj->mapping) {
			pin_mapped_count++;
			pin_mapped_size += obj->base.size;
			if (obj->pages_pin_count == 0) {
				pin_mapped_purgeable_count++;
				pin_mapped_purgeable_size += obj->base.size;
			}
		}
514
	}
515
	seq_printf(m, "%u purgeable objects, %llu bytes\n",
516
		   purgeable_count, purgeable_size);
517
	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
518
		   mappable_count, mappable_size);
519
	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
520
		   count, size);
521 522 523 524
	seq_printf(m,
		   "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
		   pin_mapped_count, pin_mapped_purgeable_count,
		   pin_mapped_size, pin_mapped_purgeable_size);
525

526
	seq_printf(m, "%llu [%llu] gtt total\n",
527
		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
528

529 530
	seq_putc(m, '\n');
	print_batch_pool_stats(m, dev_priv);
531 532 533 534

	mutex_unlock(&dev->struct_mutex);

	mutex_lock(&dev->filelist_mutex);
535 536
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct file_stats stats;
537
		struct task_struct *task;
538 539

		memset(&stats, 0, sizeof(stats));
540
		stats.file_priv = file->driver_priv;
541
		spin_lock(&file->table_lock);
542
		idr_for_each(&file->object_idr, per_file_stats, &stats);
543
		spin_unlock(&file->table_lock);
544 545 546 547 548 549 550 551
		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
552
		print_file_stats(m, task ? task->comm : "<unknown>", stats);
553
		rcu_read_unlock();
554
	}
555
	mutex_unlock(&dev->filelist_mutex);
556 557 558 559

	return 0;
}

560
static int i915_gem_gtt_info(struct seq_file *m, void *data)
561
{
562
	struct drm_info_node *node = m->private;
563
	struct drm_device *dev = node->minor->dev;
564
	uintptr_t list = (uintptr_t) node->info_ent->data;
565 566
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
567
	u64 total_obj_size, total_gtt_size;
568 569 570 571 572 573 574
	int count, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
575
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
B
Ben Widawsky 已提交
576
		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
577 578
			continue;

579
		seq_puts(m, "   ");
580
		describe_obj(m, obj);
581
		seq_putc(m, '\n');
582
		total_obj_size += obj->base.size;
583
		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
584 585 586 587 588
		count++;
	}

	mutex_unlock(&dev->struct_mutex);

589
	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
590 591 592 593 594
		   count, total_obj_size, total_gtt_size);

	return 0;
}

595 596 597 598 599 600 601
static void i915_dump_pageflip(struct seq_file *m,
			       struct drm_i915_private *dev_priv,
			       struct intel_crtc *crtc,
			       struct intel_flip_work *work)
{
	const char pipe = pipe_name(crtc->pipe);
	u32 pending;
602
	int i;
603 604 605 606

	pending = atomic_read(&work->pending);
	if (pending) {
		seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
607
			   pipe, plane_name(crtc->plane));
608 609
	} else {
		seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
610
			   pipe, plane_name(crtc->plane));
611 612
	}

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	for (i = 0; i < work->num_planes; i++) {
		struct intel_plane_state *old_plane_state = work->old_plane_state[i];
		struct drm_plane *plane = old_plane_state->base.plane;
		struct drm_i915_gem_request *req = old_plane_state->wait_req;
		struct intel_engine_cs *engine;

		seq_printf(m, "[PLANE:%i] part of flip.\n", plane->base.id);

		if (!req) {
			seq_printf(m, "Plane not associated with any engine\n");
			continue;
		}

		engine = i915_gem_request_get_engine(req);

		seq_printf(m, "Plane blocked on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
629
			   engine->name,
630
			   i915_gem_request_get_seqno(req),
631 632
			   dev_priv->next_seqno,
			   engine->get_seqno(engine),
633 634 635
			   i915_gem_request_completed(req, true));
	}

636 637
	seq_printf(m, "Flip queued on frame %d, now %d\n",
		   pending ? work->flip_queued_vblank : -1,
638 639 640
		   intel_crtc_get_vblank_counter(crtc));
}

641 642
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
643
	struct drm_info_node *node = m->private;
644
	struct drm_device *dev = node->minor->dev;
645
	struct drm_i915_private *dev_priv = dev->dev_private;
646
	struct intel_crtc *crtc;
647 648 649 650 651
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
652

653
	for_each_intel_crtc(dev, crtc) {
654 655
		const char pipe = pipe_name(crtc->pipe);
		const char plane = plane_name(crtc->plane);
656
		struct intel_flip_work *work;
657

658
		spin_lock_irq(&dev->event_lock);
659
		if (list_empty(&crtc->flip_work)) {
660
			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
661 662
				   pipe, plane);
		} else {
663 664 665
			list_for_each_entry(work, &crtc->flip_work, head) {
				i915_dump_pageflip(m, dev_priv, crtc, work);
				seq_puts(m, "\n");
666 667
			}
		}
668
		spin_unlock_irq(&dev->event_lock);
669 670
	}

671 672
	mutex_unlock(&dev->struct_mutex);

673 674 675
	return 0;
}

676 677 678 679 680 681
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
682
	struct intel_engine_cs *engine;
683
	int total = 0;
684
	int ret, j;
685 686 687 688 689

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

690
	for_each_engine(engine, dev_priv) {
691
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
692 693 694 695
			int count;

			count = 0;
			list_for_each_entry(obj,
696
					    &engine->batch_pool.cache_list[j],
697 698 699
					    batch_pool_link)
				count++;
			seq_printf(m, "%s cache[%d]: %d objects\n",
700
				   engine->name, j, count);
701 702

			list_for_each_entry(obj,
703
					    &engine->batch_pool.cache_list[j],
704 705 706 707 708 709 710
					    batch_pool_link) {
				seq_puts(m, "   ");
				describe_obj(m, obj);
				seq_putc(m, '\n');
			}

			total += count;
711
		}
712 713
	}

714
	seq_printf(m, "total: %d\n", total);
715 716 717 718 719 720

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

721 722
static int i915_gem_request_info(struct seq_file *m, void *data)
{
723
	struct drm_info_node *node = m->private;
724
	struct drm_device *dev = node->minor->dev;
725
	struct drm_i915_private *dev_priv = dev->dev_private;
726
	struct intel_engine_cs *engine;
D
Daniel Vetter 已提交
727
	struct drm_i915_gem_request *req;
728
	int ret, any;
729 730 731 732

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
733

734
	any = 0;
735
	for_each_engine(engine, dev_priv) {
736 737 738
		int count;

		count = 0;
739
		list_for_each_entry(req, &engine->request_list, list)
740 741
			count++;
		if (count == 0)
742 743
			continue;

744 745
		seq_printf(m, "%s requests: %d\n", engine->name, count);
		list_for_each_entry(req, &engine->request_list, list) {
746 747 748 749
			struct task_struct *task;

			rcu_read_lock();
			task = NULL;
D
Daniel Vetter 已提交
750 751
			if (req->pid)
				task = pid_task(req->pid, PIDTYPE_PID);
752
			seq_printf(m, "    %x @ %d: %s [%d]\n",
D
Daniel Vetter 已提交
753 754
				   req->seqno,
				   (int) (jiffies - req->emitted_jiffies),
755 756 757
				   task ? task->comm : "<unknown>",
				   task ? task->pid : -1);
			rcu_read_unlock();
758
		}
759 760

		any++;
761
	}
762 763
	mutex_unlock(&dev->struct_mutex);

764
	if (any == 0)
765
		seq_puts(m, "No requests\n");
766

767 768 769
	return 0;
}

770
static void i915_ring_seqno_info(struct seq_file *m,
771
				 struct intel_engine_cs *engine)
772
{
773 774 775 776
	seq_printf(m, "Current sequence (%s): %x\n",
		   engine->name, engine->get_seqno(engine));
	seq_printf(m, "Current user interrupts (%s): %x\n",
		   engine->name, READ_ONCE(engine->user_interrupts));
777 778
}

779 780
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
781
	struct drm_info_node *node = m->private;
782
	struct drm_device *dev = node->minor->dev;
783
	struct drm_i915_private *dev_priv = dev->dev_private;
784
	struct intel_engine_cs *engine;
785
	int ret;
786 787 788 789

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
790
	intel_runtime_pm_get(dev_priv);
791

792
	for_each_engine(engine, dev_priv)
793
		i915_ring_seqno_info(m, engine);
794

795
	intel_runtime_pm_put(dev_priv);
796 797
	mutex_unlock(&dev->struct_mutex);

798 799 800 801 802 803
	return 0;
}


static int i915_interrupt_info(struct seq_file *m, void *data)
{
804
	struct drm_info_node *node = m->private;
805
	struct drm_device *dev = node->minor->dev;
806
	struct drm_i915_private *dev_priv = dev->dev_private;
807
	struct intel_engine_cs *engine;
808
	int ret, i, pipe;
809 810 811 812

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
813
	intel_runtime_pm_get(dev_priv);
814

815 816 817 818 819 820 821 822 823 824 825 826
	if (IS_CHERRYVIEW(dev)) {
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
827
		for_each_pipe(dev_priv, pipe)
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
	} else if (INTEL_INFO(dev)->gen >= 8) {
855 856 857 858 859 860 861 862 863 864 865 866
		seq_printf(m, "Master Interrupt Control:\t%08x\n",
			   I915_READ(GEN8_MASTER_IRQ));

		for (i = 0; i < 4; i++) {
			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IMR(i)));
			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IIR(i)));
			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
				   i, I915_READ(GEN8_GT_IER(i)));
		}

867
		for_each_pipe(dev_priv, pipe) {
868 869 870 871 872
			enum intel_display_power_domain power_domain;

			power_domain = POWER_DOMAIN_PIPE(pipe);
			if (!intel_display_power_get_if_enabled(dev_priv,
								power_domain)) {
873 874 875 876
				seq_printf(m, "Pipe %c power disabled\n",
					   pipe_name(pipe));
				continue;
			}
877
			seq_printf(m, "Pipe %c IMR:\t%08x\n",
878 879
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
880
			seq_printf(m, "Pipe %c IIR:\t%08x\n",
881 882
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
883
			seq_printf(m, "Pipe %c IER:\t%08x\n",
884 885
				   pipe_name(pipe),
				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
886 887

			intel_display_power_put(dev_priv, power_domain);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
		}

		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IMR));
		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IIR));
		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_PORT_IER));

		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IMR));
		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IIR));
		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
			   I915_READ(GEN8_DE_MISC_IER));

		seq_printf(m, "PCU interrupt mask:\t%08x\n",
			   I915_READ(GEN8_PCU_IMR));
		seq_printf(m, "PCU interrupt identity:\t%08x\n",
			   I915_READ(GEN8_PCU_IIR));
		seq_printf(m, "PCU interrupt enable:\t%08x\n",
			   I915_READ(GEN8_PCU_IER));
	} else if (IS_VALLEYVIEW(dev)) {
J
Jesse Barnes 已提交
911 912 913 914 915 916 917 918
		seq_printf(m, "Display IER:\t%08x\n",
			   I915_READ(VLV_IER));
		seq_printf(m, "Display IIR:\t%08x\n",
			   I915_READ(VLV_IIR));
		seq_printf(m, "Display IIR_RW:\t%08x\n",
			   I915_READ(VLV_IIR_RW));
		seq_printf(m, "Display IMR:\t%08x\n",
			   I915_READ(VLV_IMR));
919
		for_each_pipe(dev_priv, pipe)
J
Jesse Barnes 已提交
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
			seq_printf(m, "Pipe %c stat:\t%08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));

		seq_printf(m, "Master IER:\t%08x\n",
			   I915_READ(VLV_MASTER_IER));

		seq_printf(m, "Render IER:\t%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Render IIR:\t%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Render IMR:\t%08x\n",
			   I915_READ(GTIMR));

		seq_printf(m, "PM IER:\t\t%08x\n",
			   I915_READ(GEN6_PMIER));
		seq_printf(m, "PM IIR:\t\t%08x\n",
			   I915_READ(GEN6_PMIIR));
		seq_printf(m, "PM IMR:\t\t%08x\n",
			   I915_READ(GEN6_PMIMR));

		seq_printf(m, "Port hotplug:\t%08x\n",
			   I915_READ(PORT_HOTPLUG_EN));
		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
			   I915_READ(VLV_DPFLIPSTAT));
		seq_printf(m, "DPINVGTT:\t%08x\n",
			   I915_READ(DPINVGTT));

	} else if (!HAS_PCH_SPLIT(dev)) {
949 950 951 952 953 954
		seq_printf(m, "Interrupt enable:    %08x\n",
			   I915_READ(IER));
		seq_printf(m, "Interrupt identity:  %08x\n",
			   I915_READ(IIR));
		seq_printf(m, "Interrupt mask:      %08x\n",
			   I915_READ(IMR));
955
		for_each_pipe(dev_priv, pipe)
956 957 958
			seq_printf(m, "Pipe %c stat:         %08x\n",
				   pipe_name(pipe),
				   I915_READ(PIPESTAT(pipe)));
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
	} else {
		seq_printf(m, "North Display Interrupt enable:		%08x\n",
			   I915_READ(DEIER));
		seq_printf(m, "North Display Interrupt identity:	%08x\n",
			   I915_READ(DEIIR));
		seq_printf(m, "North Display Interrupt mask:		%08x\n",
			   I915_READ(DEIMR));
		seq_printf(m, "South Display Interrupt enable:		%08x\n",
			   I915_READ(SDEIER));
		seq_printf(m, "South Display Interrupt identity:	%08x\n",
			   I915_READ(SDEIIR));
		seq_printf(m, "South Display Interrupt mask:		%08x\n",
			   I915_READ(SDEIMR));
		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
			   I915_READ(GTIER));
		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
			   I915_READ(GTIIR));
		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
			   I915_READ(GTIMR));
	}
979
	for_each_engine(engine, dev_priv) {
980
		if (INTEL_INFO(dev)->gen >= 6) {
981 982
			seq_printf(m,
				   "Graphics Interrupt mask (%s):	%08x\n",
983
				   engine->name, I915_READ_IMR(engine));
984
		}
985
		i915_ring_seqno_info(m, engine);
986
	}
987
	intel_runtime_pm_put(dev_priv);
988 989
	mutex_unlock(&dev->struct_mutex);

990 991 992
	return 0;
}

993 994
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
995
	struct drm_info_node *node = m->private;
996
	struct drm_device *dev = node->minor->dev;
997
	struct drm_i915_private *dev_priv = dev->dev_private;
998 999 1000 1001 1002
	int i, ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1003 1004 1005

	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
1006
		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
1007

C
Chris Wilson 已提交
1008 1009
		seq_printf(m, "Fence %d, pin count = %d, object = ",
			   i, dev_priv->fence_regs[i].pin_count);
1010
		if (obj == NULL)
1011
			seq_puts(m, "unused");
1012
		else
1013
			describe_obj(m, obj);
1014
		seq_putc(m, '\n');
1015 1016
	}

1017
	mutex_unlock(&dev->struct_mutex);
1018 1019 1020
	return 0;
}

1021 1022
static int i915_hws_info(struct seq_file *m, void *data)
{
1023
	struct drm_info_node *node = m->private;
1024
	struct drm_device *dev = node->minor->dev;
1025
	struct drm_i915_private *dev_priv = dev->dev_private;
1026
	struct intel_engine_cs *engine;
D
Daniel Vetter 已提交
1027
	const u32 *hws;
1028 1029
	int i;

1030
	engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
1031
	hws = engine->status_page.page_addr;
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	if (hws == NULL)
		return 0;

	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
			   i * 4,
			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
	}
	return 0;
}

1043 1044 1045 1046 1047 1048
static ssize_t
i915_error_state_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
{
1049
	struct i915_error_state_file_priv *error_priv = filp->private_data;
1050
	struct drm_device *dev = error_priv->dev;
1051
	int ret;
1052 1053 1054

	DRM_DEBUG_DRIVER("Resetting error state\n");

1055 1056 1057 1058
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
	i915_destroy_error_state(dev);
	mutex_unlock(&dev->struct_mutex);

	return cnt;
}

static int i915_error_state_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;
	struct i915_error_state_file_priv *error_priv;

	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
	if (!error_priv)
		return -ENOMEM;

	error_priv->dev = dev;

1076
	i915_error_state_get(dev, error_priv);
1077

1078 1079 1080
	file->private_data = error_priv;

	return 0;
1081 1082 1083 1084
}

static int i915_error_state_release(struct inode *inode, struct file *file)
{
1085
	struct i915_error_state_file_priv *error_priv = file->private_data;
1086

1087
	i915_error_state_put(error_priv);
1088 1089
	kfree(error_priv);

1090 1091 1092
	return 0;
}

1093 1094 1095 1096 1097 1098 1099 1100 1101
static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
				     size_t count, loff_t *pos)
{
	struct i915_error_state_file_priv *error_priv = file->private_data;
	struct drm_i915_error_state_buf error_str;
	loff_t tmp_pos = 0;
	ssize_t ret_count = 0;
	int ret;

1102
	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1103 1104
	if (ret)
		return ret;
1105

1106
	ret = i915_error_state_to_str(&error_str, error_priv);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	if (ret)
		goto out;

	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
					    error_str.buf,
					    error_str.bytes);

	if (ret_count < 0)
		ret = ret_count;
	else
		*pos = error_str.start + ret_count;
out:
1119
	i915_error_state_buf_release(&error_str);
1120
	return ret ?: ret_count;
1121 1122 1123 1124 1125
}

static const struct file_operations i915_error_state_fops = {
	.owner = THIS_MODULE,
	.open = i915_error_state_open,
1126
	.read = i915_error_state_read,
1127 1128 1129 1130 1131
	.write = i915_error_state_write,
	.llseek = default_llseek,
	.release = i915_error_state_release,
};

1132 1133
static int
i915_next_seqno_get(void *data, u64 *val)
1134
{
1135
	struct drm_device *dev = data;
1136
	struct drm_i915_private *dev_priv = dev->dev_private;
1137 1138 1139 1140 1141 1142
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1143
	*val = dev_priv->next_seqno;
1144 1145
	mutex_unlock(&dev->struct_mutex);

1146
	return 0;
1147 1148
}

1149 1150 1151 1152
static int
i915_next_seqno_set(void *data, u64 val)
{
	struct drm_device *dev = data;
1153 1154 1155 1156 1157 1158
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1159
	ret = i915_gem_set_seqno(dev, val);
1160 1161
	mutex_unlock(&dev->struct_mutex);

1162
	return ret;
1163 1164
}

1165 1166
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
			i915_next_seqno_get, i915_next_seqno_set,
1167
			"0x%llx\n");
1168

1169
static int i915_frequency_info(struct seq_file *m, void *unused)
1170
{
1171
	struct drm_info_node *node = m->private;
1172
	struct drm_device *dev = node->minor->dev;
1173
	struct drm_i915_private *dev_priv = dev->dev_private;
1174 1175 1176
	int ret = 0;

	intel_runtime_pm_get(dev_priv);
1177

1178 1179
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (IS_GEN5(dev)) {
		u16 rgvswctl = I915_READ16(MEMSWCTL);
		u16 rgvstat = I915_READ16(MEMSTAT_ILK);

		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
			   MEMSTAT_VID_SHIFT);
		seq_printf(m, "Current P-state: %d\n",
			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
		u32 freq_sts;

		mutex_lock(&dev_priv->rps.hw_lock);
		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);

		seq_printf(m, "actual GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));

		seq_printf(m, "current GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));

		seq_printf(m, "max GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));

		seq_printf(m, "min GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));

		seq_printf(m, "idle GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));

		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
		mutex_unlock(&dev_priv->rps.hw_lock);
	} else if (INTEL_INFO(dev)->gen >= 6) {
1218 1219 1220
		u32 rp_state_limits;
		u32 gt_perf_status;
		u32 rp_state_cap;
1221
		u32 rpmodectl, rpinclimit, rpdeclimit;
1222
		u32 rpstat, cagf, reqf;
1223 1224
		u32 rpupei, rpcurup, rpprevup;
		u32 rpdownei, rpcurdown, rpprevdown;
1225
		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1226 1227
		int max_freq;

1228 1229 1230 1231 1232 1233 1234 1235 1236
		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
		if (IS_BROXTON(dev)) {
			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
		} else {
			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
		}

1237
		/* RPSTAT1 is in the GT power well */
1238 1239
		ret = mutex_lock_interruptible(&dev->struct_mutex);
		if (ret)
1240
			goto out;
1241

1242
		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1243

1244
		reqf = I915_READ(GEN6_RPNSWREQ);
1245 1246 1247 1248 1249 1250 1251 1252 1253
		if (IS_GEN9(dev))
			reqf >>= 23;
		else {
			reqf &= ~GEN6_TURBO_DISABLE;
			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
				reqf >>= 24;
			else
				reqf >>= 25;
		}
1254
		reqf = intel_gpu_freq(dev_priv, reqf);
1255

1256 1257 1258 1259
		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);

1260
		rpstat = I915_READ(GEN6_RPSTAT1);
1261 1262 1263 1264 1265 1266
		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1267 1268 1269
		if (IS_GEN9(dev))
			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
B
Ben Widawsky 已提交
1270 1271 1272
			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
		else
			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1273
		cagf = intel_gpu_freq(dev_priv, cagf);
1274

1275
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1276 1277
		mutex_unlock(&dev->struct_mutex);

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
		if (IS_GEN6(dev) || IS_GEN7(dev)) {
			pm_ier = I915_READ(GEN6_PMIER);
			pm_imr = I915_READ(GEN6_PMIMR);
			pm_isr = I915_READ(GEN6_PMISR);
			pm_iir = I915_READ(GEN6_PMIIR);
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		} else {
			pm_ier = I915_READ(GEN8_GT_IER(2));
			pm_imr = I915_READ(GEN8_GT_IMR(2));
			pm_isr = I915_READ(GEN8_GT_ISR(2));
			pm_iir = I915_READ(GEN8_GT_IIR(2));
			pm_mask = I915_READ(GEN6_PMINTRMSK);
		}
1291
		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1292
			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1293 1294
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "Render p-state ratio: %d\n",
1295
			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1296 1297 1298 1299
		seq_printf(m, "Render p-state VID: %d\n",
			   gt_perf_status & 0xff);
		seq_printf(m, "Render p-state limit: %d\n",
			   rp_state_limits & 0xff);
1300 1301 1302 1303
		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1304
		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
B
Ben Widawsky 已提交
1305
		seq_printf(m, "CAGF: %dMHz\n", cagf);
1306 1307 1308 1309 1310 1311
		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
		seq_printf(m, "RP CUR UP: %d (%dus)\n",
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1312 1313 1314
		seq_printf(m, "Up threshold: %d%%\n",
			   dev_priv->rps.up_threshold);

1315 1316 1317 1318 1319 1320
		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1321 1322
		seq_printf(m, "Down threshold: %d%%\n",
			   dev_priv->rps.down_threshold);
1323

1324 1325
		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
			    rp_state_cap >> 16) & 0xff;
1326 1327
		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
			     GEN9_FREQ_SCALER : 1);
1328
		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1329
			   intel_gpu_freq(dev_priv, max_freq));
1330 1331

		max_freq = (rp_state_cap & 0xff00) >> 8;
1332 1333
		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
			     GEN9_FREQ_SCALER : 1);
1334
		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1335
			   intel_gpu_freq(dev_priv, max_freq));
1336

1337 1338
		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
			    rp_state_cap >> 0) & 0xff;
1339 1340
		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
			     GEN9_FREQ_SCALER : 1);
1341
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1342
			   intel_gpu_freq(dev_priv, max_freq));
1343
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1344
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1345

1346 1347 1348
		seq_printf(m, "Current freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1349 1350
		seq_printf(m, "Idle freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1351 1352 1353 1354 1355 1356 1357
		seq_printf(m, "Min freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
		seq_printf(m, "Max freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1358
	} else {
1359
		seq_puts(m, "no P-state info available\n");
1360
	}
1361

1362 1363 1364 1365
	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);

1366 1367 1368
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1369 1370
}

1371 1372 1373
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
1374 1375
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
1376
	struct intel_engine_cs *engine;
1377 1378
	u64 acthd[I915_NUM_ENGINES];
	u32 seqno[I915_NUM_ENGINES];
1379
	u32 instdone[I915_NUM_INSTDONE_REG];
1380 1381
	enum intel_engine_id id;
	int j;
1382 1383 1384 1385 1386 1387

	if (!i915.enable_hangcheck) {
		seq_printf(m, "Hangcheck disabled\n");
		return 0;
	}

1388 1389
	intel_runtime_pm_get(dev_priv);

1390 1391
	for_each_engine_id(engine, dev_priv, id) {
		acthd[id] = intel_ring_get_active_head(engine);
1392
		seqno[id] = engine->get_seqno(engine);
1393 1394
	}

1395
	i915_get_extra_instdone(dev_priv, instdone);
1396

1397 1398
	intel_runtime_pm_put(dev_priv);

1399 1400 1401 1402 1403 1404 1405
	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
		seq_printf(m, "Hangcheck active, fires in %dms\n",
			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
					    jiffies));
	} else
		seq_printf(m, "Hangcheck inactive\n");

1406
	for_each_engine_id(engine, dev_priv, id) {
1407
		seq_printf(m, "%s:\n", engine->name);
1408 1409 1410 1411
		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
			   engine->hangcheck.seqno,
			   seqno[id],
			   engine->last_submitted_seqno);
1412 1413 1414
		seq_printf(m, "\tuser interrupts = %x [current %x]\n",
			   engine->hangcheck.user_interrupts,
			   READ_ONCE(engine->user_interrupts));
1415
		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1416
			   (long long)engine->hangcheck.acthd,
1417
			   (long long)acthd[id]);
1418 1419
		seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
		seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
1420

1421
		if (engine->id == RCS) {
1422 1423 1424 1425 1426 1427 1428 1429 1430
			seq_puts(m, "\tinstdone read =");

			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
				seq_printf(m, " 0x%08x", instdone[j]);

			seq_puts(m, "\n\tinstdone accu =");

			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
				seq_printf(m, " 0x%08x",
1431
					   engine->hangcheck.instdone[j]);
1432 1433 1434

			seq_puts(m, "\n");
		}
1435 1436 1437 1438 1439
	}

	return 0;
}

1440
static int ironlake_drpc_info(struct seq_file *m)
1441
{
1442
	struct drm_info_node *node = m->private;
1443
	struct drm_device *dev = node->minor->dev;
1444
	struct drm_i915_private *dev_priv = dev->dev_private;
1445 1446 1447 1448 1449 1450 1451
	u32 rgvmodectl, rstdbyctl;
	u16 crstandvid;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1452
	intel_runtime_pm_get(dev_priv);
1453 1454 1455 1456 1457

	rgvmodectl = I915_READ(MEMMODECTL);
	rstdbyctl = I915_READ(RSTDBYCTL);
	crstandvid = I915_READ16(CRSTANDVID);

1458
	intel_runtime_pm_put(dev_priv);
1459
	mutex_unlock(&dev->struct_mutex);
1460

1461
	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1462 1463 1464 1465
	seq_printf(m, "Boost freq: %d\n",
		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
		   MEMMODE_BOOST_FREQ_SHIFT);
	seq_printf(m, "HW control enabled: %s\n",
1466
		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1467
	seq_printf(m, "SW control enabled: %s\n",
1468
		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1469
	seq_printf(m, "Gated voltage change: %s\n",
1470
		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1471 1472
	seq_printf(m, "Starting frequency: P%d\n",
		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1473
	seq_printf(m, "Max P-state: P%d\n",
1474
		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1475 1476 1477 1478
	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
	seq_printf(m, "Render standby enabled: %s\n",
1479
		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1480
	seq_puts(m, "Current RS state: ");
1481 1482
	switch (rstdbyctl & RSX_STATUS_MASK) {
	case RSX_STATUS_ON:
1483
		seq_puts(m, "on\n");
1484 1485
		break;
	case RSX_STATUS_RC1:
1486
		seq_puts(m, "RC1\n");
1487 1488
		break;
	case RSX_STATUS_RC1E:
1489
		seq_puts(m, "RC1E\n");
1490 1491
		break;
	case RSX_STATUS_RS1:
1492
		seq_puts(m, "RS1\n");
1493 1494
		break;
	case RSX_STATUS_RS2:
1495
		seq_puts(m, "RS2 (RC6)\n");
1496 1497
		break;
	case RSX_STATUS_RS3:
1498
		seq_puts(m, "RC3 (RC6+)\n");
1499 1500
		break;
	default:
1501
		seq_puts(m, "unknown\n");
1502 1503
		break;
	}
1504 1505 1506 1507

	return 0;
}

1508
static int i915_forcewake_domains(struct seq_file *m, void *data)
1509
{
1510 1511 1512 1513 1514 1515
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_uncore_forcewake_domain *fw_domain;

	spin_lock_irq(&dev_priv->uncore.lock);
1516
	for_each_fw_domain(fw_domain, dev_priv) {
1517
		seq_printf(m, "%s.wake_count = %u\n",
1518
			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1519 1520 1521
			   fw_domain->wake_count);
	}
	spin_unlock_irq(&dev_priv->uncore.lock);
1522

1523 1524 1525 1526 1527
	return 0;
}

static int vlv_drpc_info(struct seq_file *m)
{
1528
	struct drm_info_node *node = m->private;
1529 1530
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
1531
	u32 rpmodectl1, rcctl1, pw_status;
1532

1533 1534
	intel_runtime_pm_get(dev_priv);

1535
	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1536 1537 1538
	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);

1539 1540
	intel_runtime_pm_put(dev_priv);

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "Turbo enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
					GEN6_RC_CTL_EI_MODE(1))));
	seq_printf(m, "Render Power Well: %s\n",
1554
		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1555
	seq_printf(m, "Media Power Well: %s\n",
1556
		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1557

1558 1559 1560 1561 1562
	seq_printf(m, "Render RC6 residency since boot: %u\n",
		   I915_READ(VLV_GT_RENDER_RC6));
	seq_printf(m, "Media RC6 residency since boot: %u\n",
		   I915_READ(VLV_GT_MEDIA_RC6));

1563
	return i915_forcewake_domains(m, NULL);
1564 1565
}

1566 1567
static int gen6_drpc_info(struct seq_file *m)
{
1568
	struct drm_info_node *node = m->private;
1569 1570
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
1571
	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1572
	unsigned forcewake_count;
1573
	int count = 0, ret;
1574 1575 1576 1577

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1578
	intel_runtime_pm_get(dev_priv);
1579

1580
	spin_lock_irq(&dev_priv->uncore.lock);
1581
	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1582
	spin_unlock_irq(&dev_priv->uncore.lock);
1583 1584

	if (forcewake_count) {
1585 1586
		seq_puts(m, "RC information inaccurate because somebody "
			    "holds a forcewake reference \n");
1587 1588 1589 1590 1591 1592 1593
	} else {
		/* NB: we cannot use forcewake, else we read the wrong values */
		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
			udelay(10);
		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
	}

1594
	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1595
	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1596 1597 1598 1599

	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
	rcctl1 = I915_READ(GEN6_RC_CONTROL);
	mutex_unlock(&dev->struct_mutex);
1600 1601 1602
	mutex_lock(&dev_priv->rps.hw_lock);
	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
	mutex_unlock(&dev_priv->rps.hw_lock);
1603

1604 1605
	intel_runtime_pm_put(dev_priv);

1606 1607 1608 1609 1610 1611 1612
	seq_printf(m, "Video Turbo Mode: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
	seq_printf(m, "HW control enabled: %s\n",
		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
	seq_printf(m, "SW control enabled: %s\n",
		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
			  GEN6_RP_MEDIA_SW_MODE));
1613
	seq_printf(m, "RC1e Enabled: %s\n",
1614 1615 1616 1617 1618 1619 1620
		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
	seq_printf(m, "RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
	seq_printf(m, "Deep RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
	seq_printf(m, "Deepest RC6 Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1621
	seq_puts(m, "Current RC state: ");
1622 1623 1624
	switch (gt_core_status & GEN6_RCn_MASK) {
	case GEN6_RC0:
		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1625
			seq_puts(m, "Core Power Down\n");
1626
		else
1627
			seq_puts(m, "on\n");
1628 1629
		break;
	case GEN6_RC3:
1630
		seq_puts(m, "RC3\n");
1631 1632
		break;
	case GEN6_RC6:
1633
		seq_puts(m, "RC6\n");
1634 1635
		break;
	case GEN6_RC7:
1636
		seq_puts(m, "RC7\n");
1637 1638
		break;
	default:
1639
		seq_puts(m, "Unknown\n");
1640 1641 1642 1643 1644
		break;
	}

	seq_printf(m, "Core Power Down: %s\n",
		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655

	/* Not exactly sure what this is */
	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
	seq_printf(m, "RC6 residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6));
	seq_printf(m, "RC6+ residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6p));
	seq_printf(m, "RC6++ residency since boot: %u\n",
		   I915_READ(GEN6_GT_GFX_RC6pp));

B
Ben Widawsky 已提交
1656 1657 1658 1659 1660 1661
	seq_printf(m, "RC6   voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
	seq_printf(m, "RC6+  voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
	seq_printf(m, "RC6++ voltage: %dmV\n",
		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1662 1663 1664 1665 1666
	return 0;
}

static int i915_drpc_info(struct seq_file *m, void *unused)
{
1667
	struct drm_info_node *node = m->private;
1668 1669
	struct drm_device *dev = node->minor->dev;

1670
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1671
		return vlv_drpc_info(m);
1672
	else if (INTEL_INFO(dev)->gen >= 6)
1673 1674 1675 1676 1677
		return gen6_drpc_info(m);
	else
		return ironlake_drpc_info(m);
}

1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
		   dev_priv->fb_tracking.busy_bits);

	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
		   dev_priv->fb_tracking.flip_bits);

	return 0;
}

1693 1694
static int i915_fbc_status(struct seq_file *m, void *unused)
{
1695
	struct drm_info_node *node = m->private;
1696
	struct drm_device *dev = node->minor->dev;
1697
	struct drm_i915_private *dev_priv = dev->dev_private;
1698

1699
	if (!HAS_FBC(dev)) {
1700
		seq_puts(m, "FBC unsupported on this chipset\n");
1701 1702 1703
		return 0;
	}

1704
	intel_runtime_pm_get(dev_priv);
P
Paulo Zanoni 已提交
1705
	mutex_lock(&dev_priv->fbc.lock);
1706

1707
	if (intel_fbc_is_active(dev_priv))
1708
		seq_puts(m, "FBC enabled\n");
1709 1710
	else
		seq_printf(m, "FBC disabled: %s\n",
1711
			   dev_priv->fbc.no_fbc_reason);
1712

1713 1714 1715 1716 1717
	if (INTEL_INFO(dev_priv)->gen >= 7)
		seq_printf(m, "Compressing: %s\n",
			   yesno(I915_READ(FBC_STATUS2) &
				 FBC_COMPRESSION_MASK));

P
Paulo Zanoni 已提交
1718
	mutex_unlock(&dev_priv->fbc.lock);
1719 1720
	intel_runtime_pm_put(dev_priv);

1721 1722 1723
	return 0;
}

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
static int i915_fbc_fc_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
		return -ENODEV;

	*val = dev_priv->fbc.false_color;

	return 0;
}

static int i915_fbc_fc_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 reg;

	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
		return -ENODEV;

P
Paulo Zanoni 已提交
1746
	mutex_lock(&dev_priv->fbc.lock);
1747 1748 1749 1750 1751 1752 1753 1754

	reg = I915_READ(ILK_DPFC_CONTROL);
	dev_priv->fbc.false_color = val;

	I915_WRITE(ILK_DPFC_CONTROL, val ?
		   (reg | FBC_CTL_FALSE_COLOR) :
		   (reg & ~FBC_CTL_FALSE_COLOR));

P
Paulo Zanoni 已提交
1755
	mutex_unlock(&dev_priv->fbc.lock);
1756 1757 1758 1759 1760 1761 1762
	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
			i915_fbc_fc_get, i915_fbc_fc_set,
			"%llu\n");

1763 1764
static int i915_ips_status(struct seq_file *m, void *unused)
{
1765
	struct drm_info_node *node = m->private;
1766 1767 1768
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

1769
	if (!HAS_IPS(dev)) {
1770 1771 1772 1773
		seq_puts(m, "not supported\n");
		return 0;
	}

1774 1775
	intel_runtime_pm_get(dev_priv);

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
	seq_printf(m, "Enabled by kernel parameter: %s\n",
		   yesno(i915.enable_ips));

	if (INTEL_INFO(dev)->gen >= 8) {
		seq_puts(m, "Currently: unknown\n");
	} else {
		if (I915_READ(IPS_CTL) & IPS_ENABLE)
			seq_puts(m, "Currently: enabled\n");
		else
			seq_puts(m, "Currently: disabled\n");
	}
1787

1788 1789
	intel_runtime_pm_put(dev_priv);

1790 1791 1792
	return 0;
}

1793 1794
static int i915_sr_status(struct seq_file *m, void *unused)
{
1795
	struct drm_info_node *node = m->private;
1796
	struct drm_device *dev = node->minor->dev;
1797
	struct drm_i915_private *dev_priv = dev->dev_private;
1798 1799
	bool sr_enabled = false;

1800 1801
	intel_runtime_pm_get(dev_priv);

1802
	if (HAS_PCH_SPLIT(dev))
1803
		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1804 1805
	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
		 IS_I945G(dev) || IS_I945GM(dev))
1806 1807 1808 1809 1810
		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
	else if (IS_I915GM(dev))
		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
	else if (IS_PINEVIEW(dev))
		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1811
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1812
		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1813

1814 1815
	intel_runtime_pm_put(dev_priv);

1816 1817
	seq_printf(m, "self-refresh: %s\n",
		   sr_enabled ? "enabled" : "disabled");
1818 1819 1820 1821

	return 0;
}

1822 1823
static int i915_emon_status(struct seq_file *m, void *unused)
{
1824
	struct drm_info_node *node = m->private;
1825
	struct drm_device *dev = node->minor->dev;
1826
	struct drm_i915_private *dev_priv = dev->dev_private;
1827
	unsigned long temp, chipset, gfx;
1828 1829
	int ret;

1830 1831 1832
	if (!IS_GEN5(dev))
		return -ENODEV;

1833 1834 1835
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1836 1837 1838 1839

	temp = i915_mch_val(dev_priv);
	chipset = i915_chipset_val(dev_priv);
	gfx = i915_gfx_val(dev_priv);
1840
	mutex_unlock(&dev->struct_mutex);
1841 1842 1843 1844 1845 1846 1847 1848 1849

	seq_printf(m, "GMCH temp: %ld\n", temp);
	seq_printf(m, "Chipset power: %ld\n", chipset);
	seq_printf(m, "GFX power: %ld\n", gfx);
	seq_printf(m, "Total power: %ld\n", chipset + gfx);

	return 0;
}

1850 1851
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
1852
	struct drm_info_node *node = m->private;
1853
	struct drm_device *dev = node->minor->dev;
1854
	struct drm_i915_private *dev_priv = dev->dev_private;
1855
	int ret = 0;
1856
	int gpu_freq, ia_freq;
1857
	unsigned int max_gpu_freq, min_gpu_freq;
1858

1859
	if (!HAS_CORE_RING_FREQ(dev)) {
1860
		seq_puts(m, "unsupported on this chipset\n");
1861 1862 1863
		return 0;
	}

1864 1865
	intel_runtime_pm_get(dev_priv);

1866 1867
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

1868
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1869
	if (ret)
1870
		goto out;
1871

1872
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
		/* Convert GT frequency to 50 HZ units */
		min_gpu_freq =
			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
		max_gpu_freq =
			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
	} else {
		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
	}

1883
	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1884

1885
	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
B
Ben Widawsky 已提交
1886 1887 1888 1889
		ia_freq = gpu_freq;
		sandybridge_pcode_read(dev_priv,
				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
				       &ia_freq);
1890
		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1891
			   intel_gpu_freq(dev_priv, (gpu_freq *
1892 1893
				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
				 GEN9_FREQ_SCALER : 1))),
1894 1895
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
1896 1897
	}

1898
	mutex_unlock(&dev_priv->rps.hw_lock);
1899

1900 1901 1902
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1903 1904
}

1905 1906
static int i915_opregion(struct seq_file *m, void *unused)
{
1907
	struct drm_info_node *node = m->private;
1908
	struct drm_device *dev = node->minor->dev;
1909
	struct drm_i915_private *dev_priv = dev->dev_private;
1910 1911 1912 1913 1914
	struct intel_opregion *opregion = &dev_priv->opregion;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
1915
		goto out;
1916

1917 1918
	if (opregion->header)
		seq_write(m, opregion->header, OPREGION_SIZE);
1919 1920 1921

	mutex_unlock(&dev->struct_mutex);

1922
out:
1923 1924 1925
	return 0;
}

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
static int i915_vbt(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_opregion *opregion = &dev_priv->opregion;

	if (opregion->vbt)
		seq_write(m, opregion->vbt, opregion->vbt_size);

	return 0;
}

1939 1940
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
1941
	struct drm_info_node *node = m->private;
1942
	struct drm_device *dev = node->minor->dev;
1943
	struct intel_framebuffer *fbdev_fb = NULL;
1944
	struct drm_framebuffer *drm_fb;
1945 1946 1947 1948 1949
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
1950

1951
#ifdef CONFIG_DRM_FBDEV_EMULATION
1952 1953 1954 1955 1956 1957 1958 1959 1960
       if (to_i915(dev)->fbdev) {
               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);

               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
                         fbdev_fb->base.width,
                         fbdev_fb->base.height,
                         fbdev_fb->base.depth,
                         fbdev_fb->base.bits_per_pixel,
                         fbdev_fb->base.modifier[0],
1961
                         drm_framebuffer_read_refcount(&fbdev_fb->base));
1962 1963 1964
               describe_obj(m, fbdev_fb->obj);
               seq_putc(m, '\n');
       }
1965
#endif
1966

1967
	mutex_lock(&dev->mode_config.fb_lock);
1968
	drm_for_each_fb(drm_fb, dev) {
1969 1970
		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
		if (fb == fbdev_fb)
1971 1972
			continue;

1973
		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1974 1975 1976
			   fb->base.width,
			   fb->base.height,
			   fb->base.depth,
1977
			   fb->base.bits_per_pixel,
1978
			   fb->base.modifier[0],
1979
			   drm_framebuffer_read_refcount(&fb->base));
1980
		describe_obj(m, fb->obj);
1981
		seq_putc(m, '\n');
1982
	}
1983
	mutex_unlock(&dev->mode_config.fb_lock);
1984
	mutex_unlock(&dev->struct_mutex);
1985 1986 1987 1988

	return 0;
}

1989 1990 1991 1992 1993 1994 1995 1996
static void describe_ctx_ringbuf(struct seq_file *m,
				 struct intel_ringbuffer *ringbuf)
{
	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
		   ringbuf->space, ringbuf->head, ringbuf->tail,
		   ringbuf->last_retired_head);
}

1997 1998
static int i915_context_status(struct seq_file *m, void *unused)
{
1999
	struct drm_info_node *node = m->private;
2000
	struct drm_device *dev = node->minor->dev;
2001
	struct drm_i915_private *dev_priv = dev->dev_private;
2002
	struct intel_engine_cs *engine;
2003
	struct intel_context *ctx;
2004 2005
	enum intel_engine_id id;
	int ret;
2006

2007
	ret = mutex_lock_interruptible(&dev->struct_mutex);
2008 2009 2010
	if (ret)
		return ret;

2011
	list_for_each_entry(ctx, &dev_priv->context_list, link) {
2012 2013
		if (!i915.enable_execlists &&
		    ctx->legacy_hw_ctx.rcs_state == NULL)
2014 2015
			continue;

2016
		seq_printf(m, "HW context %u ", ctx->hw_id);
2017
		describe_ctx(m, ctx);
D
Dave Gordon 已提交
2018 2019
		if (ctx == dev_priv->kernel_context)
			seq_printf(m, "(kernel context) ");
2020 2021 2022

		if (i915.enable_execlists) {
			seq_putc(m, '\n');
2023
			for_each_engine_id(engine, dev_priv, id) {
2024
				struct drm_i915_gem_object *ctx_obj =
2025
					ctx->engine[id].state;
2026
				struct intel_ringbuffer *ringbuf =
2027
					ctx->engine[id].ringbuf;
2028

2029
				seq_printf(m, "%s: ", engine->name);
2030 2031 2032 2033 2034 2035 2036 2037 2038
				if (ctx_obj)
					describe_obj(m, ctx_obj);
				if (ringbuf)
					describe_ctx_ringbuf(m, ringbuf);
				seq_putc(m, '\n');
			}
		} else {
			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
		}
2039 2040

		seq_putc(m, '\n');
2041 2042
	}

2043
	mutex_unlock(&dev->struct_mutex);
2044 2045 2046 2047

	return 0;
}

2048
static void i915_dump_lrc_obj(struct seq_file *m,
2049
			      struct intel_context *ctx,
2050
			      struct intel_engine_cs *engine)
2051 2052 2053 2054
{
	struct page *page;
	uint32_t *reg_state;
	int j;
2055
	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2056 2057
	unsigned long ggtt_offset = 0;

2058 2059
	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);

2060
	if (ctx_obj == NULL) {
2061
		seq_puts(m, "\tNot allocated\n");
2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
		return;
	}

	if (!i915_gem_obj_ggtt_bound(ctx_obj))
		seq_puts(m, "\tNot bound in GGTT\n");
	else
		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);

	if (i915_gem_object_get_pages(ctx_obj)) {
		seq_puts(m, "\tFailed to get pages for context object\n");
		return;
	}

2075
	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
	if (!WARN_ON(page == NULL)) {
		reg_state = kmap_atomic(page);

		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
				   ggtt_offset + 4096 + (j * 4),
				   reg_state[j], reg_state[j + 1],
				   reg_state[j + 2], reg_state[j + 3]);
		}
		kunmap_atomic(reg_state);
	}

	seq_putc(m, '\n');
}

2091 2092 2093 2094 2095
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
2096
	struct intel_engine_cs *engine;
2097
	struct intel_context *ctx;
2098
	int ret;
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108

	if (!i915.enable_execlists) {
		seq_printf(m, "Logical Ring Contexts are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

D
Dave Gordon 已提交
2109
	list_for_each_entry(ctx, &dev_priv->context_list, link)
2110 2111
		for_each_engine(engine, dev_priv)
			i915_dump_lrc_obj(m, ctx, engine);
2112 2113 2114 2115 2116 2117

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

2118 2119 2120 2121 2122
static int i915_execlists(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
2123
	struct intel_engine_cs *engine;
2124 2125 2126 2127 2128 2129
	u32 status_pointer;
	u8 read_pointer;
	u8 write_pointer;
	u32 status;
	u32 ctx_id;
	struct list_head *cursor;
2130
	int i, ret;
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140

	if (!i915.enable_execlists) {
		seq_puts(m, "Logical Ring Contexts are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

2141 2142
	intel_runtime_pm_get(dev_priv);

2143
	for_each_engine(engine, dev_priv) {
2144
		struct drm_i915_gem_request *head_req = NULL;
2145 2146
		int count = 0;

2147
		seq_printf(m, "%s\n", engine->name);
2148

2149 2150
		status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
2151 2152 2153
		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
			   status, ctx_id);

2154
		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
2155 2156
		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);

2157
		read_pointer = engine->next_context_status_buffer;
2158
		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2159
		if (read_pointer > write_pointer)
2160
			write_pointer += GEN8_CSB_ENTRIES;
2161 2162 2163
		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
			   read_pointer, write_pointer);

2164
		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2165 2166
			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
2167 2168 2169 2170 2171

			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
				   i, status, ctx_id);
		}

2172
		spin_lock_bh(&engine->execlist_lock);
2173
		list_for_each(cursor, &engine->execlist_queue)
2174
			count++;
2175 2176 2177
		head_req = list_first_entry_or_null(&engine->execlist_queue,
						    struct drm_i915_gem_request,
						    execlist_link);
2178
		spin_unlock_bh(&engine->execlist_lock);
2179 2180 2181

		seq_printf(m, "\t%d requests in queue\n", count);
		if (head_req) {
2182 2183
			seq_printf(m, "\tHead request context: %u\n",
				   head_req->ctx->hw_id);
2184
			seq_printf(m, "\tHead request tail: %u\n",
2185
				   head_req->tail);
2186 2187 2188 2189 2190
		}

		seq_putc(m, '\n');
	}

2191
	intel_runtime_pm_put(dev_priv);
2192 2193 2194 2195 2196
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

2197 2198
static const char *swizzle_string(unsigned swizzle)
{
2199
	switch (swizzle) {
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	case I915_BIT_6_SWIZZLE_NONE:
		return "none";
	case I915_BIT_6_SWIZZLE_9:
		return "bit9";
	case I915_BIT_6_SWIZZLE_9_10:
		return "bit9/bit10";
	case I915_BIT_6_SWIZZLE_9_11:
		return "bit9/bit11";
	case I915_BIT_6_SWIZZLE_9_10_11:
		return "bit9/bit10/bit11";
	case I915_BIT_6_SWIZZLE_9_17:
		return "bit9/bit17";
	case I915_BIT_6_SWIZZLE_9_10_17:
		return "bit9/bit10/bit17";
	case I915_BIT_6_SWIZZLE_UNKNOWN:
2215
		return "unknown";
2216 2217 2218 2219 2220 2221 2222
	}

	return "bug";
}

static int i915_swizzle_info(struct seq_file *m, void *data)
{
2223
	struct drm_info_node *node = m->private;
2224 2225
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
2226 2227 2228 2229 2230
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
2231
	intel_runtime_pm_get(dev_priv);
2232 2233 2234 2235 2236 2237 2238 2239 2240

	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));

	if (IS_GEN3(dev) || IS_GEN4(dev)) {
		seq_printf(m, "DDC = 0x%08x\n",
			   I915_READ(DCC));
2241 2242
		seq_printf(m, "DDC2 = 0x%08x\n",
			   I915_READ(DCC2));
2243 2244 2245 2246
		seq_printf(m, "C0DRB3 = 0x%04x\n",
			   I915_READ16(C0DRB3));
		seq_printf(m, "C1DRB3 = 0x%04x\n",
			   I915_READ16(C1DRB3));
B
Ben Widawsky 已提交
2247
	} else if (INTEL_INFO(dev)->gen >= 6) {
2248 2249 2250 2251 2252 2253 2254 2255
		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C0));
		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C1));
		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
			   I915_READ(MAD_DIMM_C2));
		seq_printf(m, "TILECTL = 0x%08x\n",
			   I915_READ(TILECTL));
2256
		if (INTEL_INFO(dev)->gen >= 8)
B
Ben Widawsky 已提交
2257 2258 2259 2260 2261
			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
				   I915_READ(GAMTARBMODE));
		else
			seq_printf(m, "ARB_MODE = 0x%08x\n",
				   I915_READ(ARB_MODE));
2262 2263
		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
			   I915_READ(DISP_ARB_CTL));
2264
	}
2265 2266 2267 2268

	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		seq_puts(m, "L-shaped memory detected\n");

2269
	intel_runtime_pm_put(dev_priv);
2270 2271 2272 2273 2274
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

B
Ben Widawsky 已提交
2275 2276
static int per_file_ctx(int id, void *ptr, void *data)
{
2277
	struct intel_context *ctx = ptr;
B
Ben Widawsky 已提交
2278
	struct seq_file *m = data;
2279 2280 2281 2282 2283 2284 2285
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;

	if (!ppgtt) {
		seq_printf(m, "  no ppgtt for context %d\n",
			   ctx->user_handle);
		return 0;
	}
B
Ben Widawsky 已提交
2286

2287 2288 2289
	if (i915_gem_context_is_default(ctx))
		seq_puts(m, "  default context:\n");
	else
2290
		seq_printf(m, "  context %d:\n", ctx->user_handle);
B
Ben Widawsky 已提交
2291 2292 2293 2294 2295
	ppgtt->debug_dump(ppgtt, m);

	return 0;
}

B
Ben Widawsky 已提交
2296
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
D
Daniel Vetter 已提交
2297 2298
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2299
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
2300
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2301
	int i;
D
Daniel Vetter 已提交
2302

B
Ben Widawsky 已提交
2303 2304 2305
	if (!ppgtt)
		return;

2306
	for_each_engine(engine, dev_priv) {
2307
		seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
2308
		for (i = 0; i < 4; i++) {
2309
			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
B
Ben Widawsky 已提交
2310
			pdp <<= 32;
2311
			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2312
			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
B
Ben Widawsky 已提交
2313 2314 2315 2316 2317 2318 2319
		}
	}
}

static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2320
	struct intel_engine_cs *engine;
D
Daniel Vetter 已提交
2321

2322
	if (IS_GEN6(dev_priv))
D
Daniel Vetter 已提交
2323 2324
		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));

2325
	for_each_engine(engine, dev_priv) {
2326
		seq_printf(m, "%s\n", engine->name);
2327
		if (IS_GEN7(dev_priv))
2328 2329 2330 2331 2332 2333 2334 2335
			seq_printf(m, "GFX_MODE: 0x%08x\n",
				   I915_READ(RING_MODE_GEN7(engine)));
		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE(engine)));
		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
			   I915_READ(RING_PP_DIR_DCLV(engine)));
D
Daniel Vetter 已提交
2336 2337 2338 2339
	}
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

2340
		seq_puts(m, "aliasing PPGTT:\n");
2341
		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
B
Ben Widawsky 已提交
2342

B
Ben Widawsky 已提交
2343
		ppgtt->debug_dump(ppgtt, m);
2344
	}
B
Ben Widawsky 已提交
2345

D
Daniel Vetter 已提交
2346
	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
B
Ben Widawsky 已提交
2347 2348 2349 2350
}

static int i915_ppgtt_info(struct seq_file *m, void *data)
{
2351
	struct drm_info_node *node = m->private;
B
Ben Widawsky 已提交
2352
	struct drm_device *dev = node->minor->dev;
2353
	struct drm_i915_private *dev_priv = dev->dev_private;
2354
	struct drm_file *file;
B
Ben Widawsky 已提交
2355 2356 2357 2358

	int ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
2359
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
2360 2361 2362 2363 2364 2365

	if (INTEL_INFO(dev)->gen >= 8)
		gen8_ppgtt_info(m, dev);
	else if (INTEL_INFO(dev)->gen >= 6)
		gen6_ppgtt_info(m, dev);

2366
	mutex_lock(&dev->filelist_mutex);
2367 2368
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
2369
		struct task_struct *task;
2370

2371
		task = get_pid_task(file->pid, PIDTYPE_PID);
2372 2373 2374 2375
		if (!task) {
			ret = -ESRCH;
			goto out_put;
		}
2376 2377
		seq_printf(m, "\nproc: %s\n", task->comm);
		put_task_struct(task);
2378 2379 2380
		idr_for_each(&file_priv->context_idr, per_file_ctx,
			     (void *)(unsigned long)m);
	}
2381
	mutex_unlock(&dev->filelist_mutex);
2382

2383
out_put:
2384
	intel_runtime_pm_put(dev_priv);
D
Daniel Vetter 已提交
2385 2386
	mutex_unlock(&dev->struct_mutex);

2387
	return ret;
D
Daniel Vetter 已提交
2388 2389
}

2390 2391
static int count_irq_waiters(struct drm_i915_private *i915)
{
2392
	struct intel_engine_cs *engine;
2393 2394
	int count = 0;

2395
	for_each_engine(engine, i915)
2396
		count += engine->irq_refcount;
2397 2398 2399 2400

	return count;
}

2401 2402 2403 2404 2405 2406 2407
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_file *file;

2408 2409 2410 2411 2412 2413 2414 2415 2416
	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2417 2418

	mutex_lock(&dev->filelist_mutex);
2419
	spin_lock(&dev_priv->rps.client_lock);
2420 2421 2422 2423 2424 2425 2426 2427 2428
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "%s [%d]: %d boosts%s\n",
			   task ? task->comm : "<unknown>",
			   task ? task->pid : -1,
2429 2430
			   file_priv->rps.boosts,
			   list_empty(&file_priv->rps.link) ? "" : ", active");
2431 2432
		rcu_read_unlock();
	}
2433 2434 2435 2436 2437 2438
	seq_printf(m, "Semaphore boosts: %d%s\n",
		   dev_priv->rps.semaphores.boosts,
		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
	seq_printf(m, "MMIO flip boosts: %d%s\n",
		   dev_priv->rps.mmioflips.boosts,
		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2439
	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2440
	spin_unlock(&dev_priv->rps.client_lock);
2441
	mutex_unlock(&dev->filelist_mutex);
2442

2443
	return 0;
2444 2445
}

2446 2447
static int i915_llc(struct seq_file *m, void *data)
{
2448
	struct drm_info_node *node = m->private;
2449 2450
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
2451
	const bool edram = INTEL_GEN(dev_priv) > 8;
2452 2453

	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2454 2455
	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
		   intel_uncore_edram_size(dev_priv)/1024/1024);
2456 2457 2458 2459

	return 0;
}

2460 2461 2462 2463 2464 2465 2466
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
	u32 tmp, i;

2467
	if (!HAS_GUC_UCODE(dev_priv))
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
		return 0;

	seq_printf(m, "GuC firmware status:\n");
	seq_printf(m, "\tpath: %s\n",
		guc_fw->guc_fw_path);
	seq_printf(m, "\tfetch: %s\n",
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
	seq_printf(m, "\tload: %s\n",
		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
	seq_printf(m, "\tversion wanted: %d.%d\n",
		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
	seq_printf(m, "\tversion found: %d.%d\n",
		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
A
Alex Dai 已提交
2481 2482 2483 2484 2485 2486
	seq_printf(m, "\theader: offset is %d; size = %d\n",
		guc_fw->header_offset, guc_fw->header_size);
	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
		guc_fw->ucode_offset, guc_fw->ucode_size);
	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
		guc_fw->rsa_offset, guc_fw->rsa_size);
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503

	tmp = I915_READ(GUC_STATUS);

	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
	seq_printf(m, "\tBootrom status = 0x%x\n",
		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
	seq_printf(m, "\tuKernel status = 0x%x\n",
		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
	seq_printf(m, "\tMIA Core status = 0x%x\n",
		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
	seq_puts(m, "\nScratch registers:\n");
	for (i = 0; i < 16; i++)
		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));

	return 0;
}

2504 2505 2506 2507
static void i915_guc_client_info(struct seq_file *m,
				 struct drm_i915_private *dev_priv,
				 struct i915_guc_client *client)
{
2508
	struct intel_engine_cs *engine;
2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
	uint64_t tot = 0;

	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
		client->priority, client->ctx_index, client->proc_desc_offset);
	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
		client->doorbell_id, client->doorbell_offset, client->cookie);
	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
		client->wq_size, client->wq_offset, client->wq_tail);

	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
	seq_printf(m, "\tLast submission result: %d\n", client->retcode);

2522
	for_each_engine(engine, dev_priv) {
2523
		seq_printf(m, "\tSubmissions: %llu %s\n",
2524 2525 2526
				client->submissions[engine->guc_id],
				engine->name);
		tot += client->submissions[engine->guc_id];
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
	}
	seq_printf(m, "\tTotal: %llu\n", tot);
}

static int i915_guc_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_guc guc;
2537
	struct i915_guc_client client = {};
2538
	struct intel_engine_cs *engine;
2539 2540
	u64 total = 0;

2541
	if (!HAS_GUC_SCHED(dev_priv))
2542 2543
		return 0;

A
Alex Dai 已提交
2544 2545 2546
	if (mutex_lock_interruptible(&dev->struct_mutex))
		return 0;

2547 2548
	/* Take a local copy of the GuC data, so we can dump it at leisure */
	guc = dev_priv->guc;
A
Alex Dai 已提交
2549
	if (guc.execbuf_client)
2550
		client = *guc.execbuf_client;
A
Alex Dai 已提交
2551 2552

	mutex_unlock(&dev->struct_mutex);
2553 2554 2555 2556 2557 2558 2559 2560

	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);

	seq_printf(m, "\nGuC submissions:\n");
2561
	for_each_engine(engine, dev_priv) {
2562
		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2563 2564 2565
			engine->name, guc.submissions[engine->guc_id],
			guc.last_seqno[engine->guc_id]);
		total += guc.submissions[engine->guc_id];
2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576
	}
	seq_printf(m, "\t%s: %llu\n", "Total", total);

	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
	i915_guc_client_info(m, dev_priv, &client);

	/* Add more as required ... */

	return 0;
}

A
Alex Dai 已提交
2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604
static int i915_guc_log_dump(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
	u32 *log;
	int i = 0, pg;

	if (!log_obj)
		return 0;

	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));

		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
				   *(log + i), *(log + i + 1),
				   *(log + i + 2), *(log + i + 3));

		kunmap_atomic(log);
	}

	seq_putc(m, '\n');

	return 0;
}

2605 2606 2607 2608 2609
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
R
Rodrigo Vivi 已提交
2610
	u32 psrperf = 0;
R
Rodrigo Vivi 已提交
2611 2612
	u32 stat[3];
	enum pipe pipe;
R
Rodrigo Vivi 已提交
2613
	bool enabled = false;
2614

2615 2616 2617 2618 2619
	if (!HAS_PSR(dev)) {
		seq_puts(m, "PSR not supported\n");
		return 0;
	}

2620 2621
	intel_runtime_pm_get(dev_priv);

2622
	mutex_lock(&dev_priv->psr.lock);
R
Rodrigo Vivi 已提交
2623 2624
	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2625
	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2626
	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2627 2628 2629 2630
	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
		   dev_priv->psr.busy_frontbuffer_bits);
	seq_printf(m, "Re-enable work scheduled: %s\n",
		   yesno(work_busy(&dev_priv->psr.work.work)));
2631

2632
	if (HAS_DDI(dev))
2633
		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2634 2635 2636 2637 2638 2639 2640
	else {
		for_each_pipe(dev_priv, pipe) {
			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
				VLV_EDP_PSR_CURR_STATE_MASK;
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				enabled = true;
R
Rodrigo Vivi 已提交
2641 2642
		}
	}
2643 2644 2645 2646

	seq_printf(m, "Main link in standby mode: %s\n",
		   yesno(dev_priv->psr.link_standby));

R
Rodrigo Vivi 已提交
2647 2648 2649 2650 2651 2652 2653 2654 2655
	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));

	if (!HAS_DDI(dev))
		for_each_pipe(dev_priv, pipe) {
			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
				seq_printf(m, " pipe %c", pipe_name(pipe));
		}
	seq_puts(m, "\n");
2656

2657 2658 2659 2660 2661
	/*
	 * VLV/CHV PSR has no kind of performance counter
	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
	 */
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2662
		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
R
Rodrigo Vivi 已提交
2663
			EDP_PSR_PERF_CNT_MASK;
R
Rodrigo Vivi 已提交
2664 2665 2666

		seq_printf(m, "Performance_Counter: %u\n", psrperf);
	}
2667
	mutex_unlock(&dev_priv->psr.lock);
2668

2669
	intel_runtime_pm_put(dev_priv);
2670 2671 2672
	return 0;
}

2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683
static int i915_sink_crc(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_encoder *encoder;
	struct intel_connector *connector;
	struct intel_dp *intel_dp = NULL;
	int ret;
	u8 crc[6];

	drm_modeset_lock_all(dev);
2684
	for_each_intel_connector(dev, connector) {
2685 2686 2687 2688

		if (connector->base.dpms != DRM_MODE_DPMS_ON)
			continue;

2689 2690 2691
		if (!connector->base.encoder)
			continue;

2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
		encoder = to_intel_encoder(connector->base.encoder);
		if (encoder->type != INTEL_OUTPUT_EDP)
			continue;

		intel_dp = enc_to_intel_dp(&encoder->base);

		ret = intel_dp_sink_crc(intel_dp, crc);
		if (ret)
			goto out;

		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
			   crc[0], crc[1], crc[2],
			   crc[3], crc[4], crc[5]);
		goto out;
	}
	ret = -ENODEV;
out:
	drm_modeset_unlock_all(dev);
	return ret;
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
static int i915_energy_uJ(struct seq_file *m, void *data)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 power;
	u32 units;

	if (INTEL_INFO(dev)->gen < 6)
		return -ENODEV;

2724 2725
	intel_runtime_pm_get(dev_priv);

2726 2727 2728 2729 2730 2731
	rdmsrl(MSR_RAPL_POWER_UNIT, power);
	power = (power & 0x1f00) >> 8;
	units = 1000000 / (1 << power); /* convert to uJ */
	power = I915_READ(MCH_SECP_NRG_STTS);
	power *= units;

2732 2733
	intel_runtime_pm_put(dev_priv);

2734
	seq_printf(m, "%llu", (long long unsigned)power);
2735 2736 2737 2738

	return 0;
}

2739
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2740
{
2741
	struct drm_info_node *node = m->private;
2742 2743 2744
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

2745 2746
	if (!HAS_RUNTIME_PM(dev_priv))
		seq_puts(m, "Runtime power management not supported\n");
2747

2748
	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2749
	seq_printf(m, "IRQs disabled: %s\n",
2750
		   yesno(!intel_irqs_enabled(dev_priv)));
2751
#ifdef CONFIG_PM
2752 2753
	seq_printf(m, "Usage count: %d\n",
		   atomic_read(&dev->dev->power.usage_count));
2754 2755 2756
#else
	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
2757 2758 2759
	seq_printf(m, "PCI device power state: %s [%d]\n",
		   pci_power_name(dev_priv->dev->pdev->current_state),
		   dev_priv->dev->pdev->current_state);
2760

2761 2762 2763
	return 0;
}

2764 2765
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
2766
	struct drm_info_node *node = m->private;
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
	int i;

	mutex_lock(&power_domains->lock);

	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
	for (i = 0; i < power_domains->power_well_count; i++) {
		struct i915_power_well *power_well;
		enum intel_display_power_domain power_domain;

		power_well = &power_domains->power_wells[i];
		seq_printf(m, "%-25s %d\n", power_well->name,
			   power_well->count);

		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
		     power_domain++) {
			if (!(BIT(power_domain) & power_well->domains))
				continue;

			seq_printf(m, "  %-23s %d\n",
2789
				 intel_display_power_domain_str(power_domain),
2790 2791 2792 2793 2794 2795 2796 2797 2798
				 power_domains->domain_use_count[power_domain]);
		}
	}

	mutex_unlock(&power_domains->lock);

	return 0;
}

2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
static int i915_dmc_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_csr *csr;

	if (!HAS_CSR(dev)) {
		seq_puts(m, "not supported\n");
		return 0;
	}

	csr = &dev_priv->csr;

2813 2814
	intel_runtime_pm_get(dev_priv);

2815 2816 2817 2818
	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
	seq_printf(m, "path: %s\n", csr->fw_path);

	if (!csr->dmc_payload)
2819
		goto out;
2820 2821 2822 2823

	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
		   CSR_VERSION_MINOR(csr->version));

2824 2825 2826 2827 2828
	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
		seq_printf(m, "DC5 -> DC6 count: %d\n",
			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2829 2830 2831
	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
		seq_printf(m, "DC3 -> DC5 count: %d\n",
			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2832 2833
	}

2834 2835 2836 2837 2838
out:
	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));

2839 2840
	intel_runtime_pm_put(dev_priv);

2841 2842 2843
	return 0;
}

2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865
static void intel_seq_print_mode(struct seq_file *m, int tabs,
				 struct drm_display_mode *mode)
{
	int i;

	for (i = 0; i < tabs; i++)
		seq_putc(m, '\t');

	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
		   mode->base.id, mode->name,
		   mode->vrefresh, mode->clock,
		   mode->hdisplay, mode->hsync_start,
		   mode->hsync_end, mode->htotal,
		   mode->vdisplay, mode->vsync_start,
		   mode->vsync_end, mode->vtotal,
		   mode->type, mode->flags);
}

static void intel_encoder_info(struct seq_file *m,
			       struct intel_crtc *intel_crtc,
			       struct intel_encoder *intel_encoder)
{
2866
	struct drm_info_node *node = m->private;
2867 2868 2869 2870 2871 2872 2873
	struct drm_device *dev = node->minor->dev;
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_connector *intel_connector;
	struct drm_encoder *encoder;

	encoder = &intel_encoder->base;
	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2874
		   encoder->base.id, encoder->name);
2875 2876 2877 2878
	for_each_connector_on_encoder(dev, encoder, intel_connector) {
		struct drm_connector *connector = &intel_connector->base;
		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
			   connector->base.id,
2879
			   connector->name,
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
			   drm_get_connector_status_name(connector->status));
		if (connector->status == connector_status_connected) {
			struct drm_display_mode *mode = &crtc->mode;
			seq_printf(m, ", mode:\n");
			intel_seq_print_mode(m, 2, mode);
		} else {
			seq_putc(m, '\n');
		}
	}
}

static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
2893
	struct drm_info_node *node = m->private;
2894 2895 2896
	struct drm_device *dev = node->minor->dev;
	struct drm_crtc *crtc = &intel_crtc->base;
	struct intel_encoder *intel_encoder;
2897 2898
	struct drm_plane_state *plane_state = crtc->primary->state;
	struct drm_framebuffer *fb = plane_state->fb;
2899

2900
	if (fb)
2901
		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2902 2903
			   fb->base.id, plane_state->src_x >> 16,
			   plane_state->src_y >> 16, fb->width, fb->height);
2904 2905
	else
		seq_puts(m, "\tprimary plane disabled\n");
2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
		intel_encoder_info(m, intel_crtc, intel_encoder);
}

static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
	struct drm_display_mode *mode = panel->fixed_mode;

	seq_printf(m, "\tfixed mode:\n");
	intel_seq_print_mode(m, 2, mode);
}

static void intel_dp_info(struct seq_file *m,
			  struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);

	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2925
	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
	if (intel_encoder->type == INTEL_OUTPUT_EDP)
		intel_panel_info(m, &intel_connector->panel);
}

static void intel_hdmi_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	struct intel_encoder *intel_encoder = intel_connector->encoder;
	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);

2936
	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
}

static void intel_lvds_info(struct seq_file *m,
			    struct intel_connector *intel_connector)
{
	intel_panel_info(m, &intel_connector->panel);
}

static void intel_connector_info(struct seq_file *m,
				 struct drm_connector *connector)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct intel_encoder *intel_encoder = intel_connector->encoder;
2950
	struct drm_display_mode *mode;
2951 2952

	seq_printf(m, "connector %d: type %s, status: %s\n",
2953
		   connector->base.id, connector->name,
2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
		   drm_get_connector_status_name(connector->status));
	if (connector->status == connector_status_connected) {
		seq_printf(m, "\tname: %s\n", connector->display_info.name);
		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
			   connector->display_info.width_mm,
			   connector->display_info.height_mm);
		seq_printf(m, "\tsubpixel order: %s\n",
			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
		seq_printf(m, "\tCEA rev: %d\n",
			   connector->display_info.cea_rev);
	}
2965 2966 2967 2968 2969 2970 2971 2972 2973
	if (intel_encoder) {
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
		    intel_encoder->type == INTEL_OUTPUT_EDP)
			intel_dp_info(m, intel_connector);
		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
			intel_hdmi_info(m, intel_connector);
		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
			intel_lvds_info(m, intel_connector);
	}
2974

2975 2976 2977
	seq_printf(m, "\tmodes:\n");
	list_for_each_entry(mode, &connector->modes, head)
		intel_seq_print_mode(m, 2, mode);
2978 2979
}

2980 2981 2982 2983 2984 2985
static bool cursor_active(struct drm_device *dev, int pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 state;

	if (IS_845G(dev) || IS_I865G(dev))
2986
		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2987
	else
2988
		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2989 2990 2991 2992 2993 2994 2995 2996 2997

	return state;
}

static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 pos;

2998
	pos = I915_READ(CURPOS(pipe));
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010

	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
		*x = -*x;

	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
		*y = -*y;

	return cursor_active(dev, pipe);
}

3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111
static const char *plane_type(enum drm_plane_type type)
{
	switch (type) {
	case DRM_PLANE_TYPE_OVERLAY:
		return "OVL";
	case DRM_PLANE_TYPE_PRIMARY:
		return "PRI";
	case DRM_PLANE_TYPE_CURSOR:
		return "CUR";
	/*
	 * Deliberately omitting default: to generate compiler warnings
	 * when a new drm_plane_type gets added.
	 */
	}

	return "unknown";
}

static const char *plane_rotation(unsigned int rotation)
{
	static char buf[48];
	/*
	 * According to doc only one DRM_ROTATE_ is allowed but this
	 * will print them all to visualize if the values are misused
	 */
	snprintf(buf, sizeof(buf),
		 "%s%s%s%s%s%s(0x%08x)",
		 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
		 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
		 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
		 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
		 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
		 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
		 rotation);

	return buf;
}

static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_plane *intel_plane;

	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
		struct drm_plane_state *state;
		struct drm_plane *plane = &intel_plane->base;

		if (!plane->state) {
			seq_puts(m, "plane->state is NULL!\n");
			continue;
		}

		state = plane->state;

		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
			   plane->base.id,
			   plane_type(intel_plane->base.type),
			   state->crtc_x, state->crtc_y,
			   state->crtc_w, state->crtc_h,
			   (state->src_x >> 16),
			   ((state->src_x & 0xffff) * 15625) >> 10,
			   (state->src_y >> 16),
			   ((state->src_y & 0xffff) * 15625) >> 10,
			   (state->src_w >> 16),
			   ((state->src_w & 0xffff) * 15625) >> 10,
			   (state->src_h >> 16),
			   ((state->src_h & 0xffff) * 15625) >> 10,
			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
			   plane_rotation(state->rotation));
	}
}

static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
	struct intel_crtc_state *pipe_config;
	int num_scalers = intel_crtc->num_scalers;
	int i;

	pipe_config = to_intel_crtc_state(intel_crtc->base.state);

	/* Not all platformas have a scaler */
	if (num_scalers) {
		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
			   num_scalers,
			   pipe_config->scaler_state.scaler_users,
			   pipe_config->scaler_state.scaler_id);

		for (i = 0; i < SKL_NUM_SCALERS; i++) {
			struct intel_scaler *sc =
					&pipe_config->scaler_state.scalers[i];

			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
				   i, yesno(sc->in_use), sc->mode);
		}
		seq_puts(m, "\n");
	} else {
		seq_puts(m, "\tNo scalers available on this platform\n");
	}
}

3112 3113
static int i915_display_info(struct seq_file *m, void *unused)
{
3114
	struct drm_info_node *node = m->private;
3115
	struct drm_device *dev = node->minor->dev;
3116
	struct drm_i915_private *dev_priv = dev->dev_private;
3117
	struct intel_crtc *crtc;
3118 3119
	struct drm_connector *connector;

3120
	intel_runtime_pm_get(dev_priv);
3121 3122 3123
	drm_modeset_lock_all(dev);
	seq_printf(m, "CRTC info\n");
	seq_printf(m, "---------\n");
3124
	for_each_intel_crtc(dev, crtc) {
3125
		bool active;
3126
		struct intel_crtc_state *pipe_config;
3127
		int x, y;
3128

3129 3130
		pipe_config = to_intel_crtc_state(crtc->base.state);

3131
		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3132
			   crtc->base.base.id, pipe_name(crtc->pipe),
3133
			   yesno(pipe_config->base.active),
3134 3135 3136
			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
			   yesno(pipe_config->dither), pipe_config->pipe_bpp);

3137
		if (pipe_config->base.active) {
3138 3139
			intel_crtc_info(m, crtc);

3140
			active = cursor_position(dev, crtc->pipe, &x, &y);
3141
			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3142
				   yesno(crtc->cursor_base),
3143 3144
				   x, y, crtc->base.cursor->state->crtc_w,
				   crtc->base.cursor->state->crtc_h,
3145
				   crtc->cursor_addr, yesno(active));
3146 3147
			intel_scaler_info(m, crtc);
			intel_plane_info(m, crtc);
3148
		}
3149 3150 3151 3152

		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
			   yesno(!crtc->cpu_fifo_underrun_disabled),
			   yesno(!crtc->pch_fifo_underrun_disabled));
3153 3154 3155 3156 3157 3158 3159 3160 3161
	}

	seq_printf(m, "\n");
	seq_printf(m, "Connector info\n");
	seq_printf(m, "--------------\n");
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		intel_connector_info(m, connector);
	}
	drm_modeset_unlock_all(dev);
3162
	intel_runtime_pm_put(dev_priv);
3163 3164 3165 3166

	return 0;
}

B
Ben Widawsky 已提交
3167 3168 3169 3170 3171
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
3172
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
3173
	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3174 3175
	enum intel_engine_id id;
	int j, ret;
B
Ben Widawsky 已提交
3176

3177
	if (!i915_semaphore_is_enabled(dev_priv)) {
B
Ben Widawsky 已提交
3178 3179 3180 3181 3182 3183 3184
		seq_puts(m, "Semaphores are disabled\n");
		return 0;
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
3185
	intel_runtime_pm_get(dev_priv);
B
Ben Widawsky 已提交
3186 3187 3188 3189 3190 3191 3192 3193

	if (IS_BROADWELL(dev)) {
		struct page *page;
		uint64_t *seqno;

		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);

		seqno = (uint64_t *)kmap_atomic(page);
3194
		for_each_engine_id(engine, dev_priv, id) {
B
Ben Widawsky 已提交
3195 3196
			uint64_t offset;

3197
			seq_printf(m, "%s\n", engine->name);
B
Ben Widawsky 已提交
3198 3199 3200

			seq_puts(m, "  Last signal:");
			for (j = 0; j < num_rings; j++) {
3201
				offset = id * I915_NUM_ENGINES + j;
B
Ben Widawsky 已提交
3202 3203 3204 3205 3206 3207 3208
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

			seq_puts(m, "  Last wait:  ");
			for (j = 0; j < num_rings; j++) {
3209
				offset = id + (j * I915_NUM_ENGINES);
B
Ben Widawsky 已提交
3210 3211 3212 3213 3214 3215 3216 3217 3218
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
			seq_putc(m, '\n');

		}
		kunmap_atomic(seqno);
	} else {
		seq_puts(m, "  Last signal:");
3219
		for_each_engine(engine, dev_priv)
B
Ben Widawsky 已提交
3220 3221
			for (j = 0; j < num_rings; j++)
				seq_printf(m, "0x%08x\n",
3222
					   I915_READ(engine->semaphore.mbox.signal[j]));
B
Ben Widawsky 已提交
3223 3224 3225 3226
		seq_putc(m, '\n');
	}

	seq_puts(m, "\nSync seqno:\n");
3227 3228
	for_each_engine(engine, dev_priv) {
		for (j = 0; j < num_rings; j++)
3229 3230
			seq_printf(m, "  0x%08x ",
				   engine->semaphore.sync_seqno[j]);
B
Ben Widawsky 已提交
3231 3232 3233 3234
		seq_putc(m, '\n');
	}
	seq_putc(m, '\n');

3235
	intel_runtime_pm_put(dev_priv);
B
Ben Widawsky 已提交
3236 3237 3238 3239
	mutex_unlock(&dev->struct_mutex);
	return 0;
}

3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	drm_modeset_lock_all(dev);
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];

		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3252 3253
		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
			   pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
3254
		seq_printf(m, " tracked hardware state:\n");
3255 3256 3257 3258 3259 3260
		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
		seq_printf(m, " dpll_md: 0x%08x\n",
			   pll->config.hw_state.dpll_md);
		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
3261 3262 3263 3264 3265 3266
	}
	drm_modeset_unlock_all(dev);

	return 0;
}

3267
static int i915_wa_registers(struct seq_file *m, void *unused)
3268 3269 3270
{
	int i;
	int ret;
3271
	struct intel_engine_cs *engine;
3272 3273 3274
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
3275
	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3276
	enum intel_engine_id id;
3277 3278 3279 3280 3281 3282 3283

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(dev_priv);

3284
	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3285
	for_each_engine_id(engine, dev_priv, id)
3286
		seq_printf(m, "HW whitelist count for %s: %d\n",
3287
			   engine->name, workarounds->hw_whitelist_count[id]);
3288
	for (i = 0; i < workarounds->count; ++i) {
3289 3290
		i915_reg_t addr;
		u32 mask, value, read;
3291
		bool ok;
3292

3293 3294 3295
		addr = workarounds->reg[i].addr;
		mask = workarounds->reg[i].mask;
		value = workarounds->reg[i].value;
3296 3297 3298
		read = I915_READ(addr);
		ok = (value & mask) == (read & mask);
		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3299
			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3300 3301 3302 3303 3304 3305 3306 3307
	}

	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

3308 3309 3310 3311 3312 3313 3314 3315 3316 3317
static int i915_ddb_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct skl_ddb_allocation *ddb;
	struct skl_ddb_entry *entry;
	enum pipe pipe;
	int plane;

3318 3319 3320
	if (INTEL_INFO(dev)->gen < 9)
		return 0;

3321 3322 3323 3324 3325 3326 3327 3328 3329
	drm_modeset_lock_all(dev);

	ddb = &dev_priv->wm.skl_hw.ddb;

	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");

	for_each_pipe(dev_priv, pipe) {
		seq_printf(m, "Pipe %c\n", pipe_name(pipe));

3330
		for_each_plane(dev_priv, pipe, plane) {
3331 3332 3333 3334 3335 3336
			entry = &ddb->plane[pipe][plane];
			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
				   entry->start, entry->end,
				   skl_ddb_entry_size(entry));
		}

3337
		entry = &ddb->plane[pipe][PLANE_CURSOR];
3338 3339 3340 3341 3342 3343 3344 3345 3346
		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
			   entry->end, skl_ddb_entry_size(entry));
	}

	drm_modeset_unlock_all(dev);

	return 0;
}

3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387
static void drrs_status_per_crtc(struct seq_file *m,
		struct drm_device *dev, struct intel_crtc *intel_crtc)
{
	struct intel_encoder *intel_encoder;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_drrs *drrs = &dev_priv->drrs;
	int vrefresh = 0;

	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
		/* Encoder connected on this CRTC */
		switch (intel_encoder->type) {
		case INTEL_OUTPUT_EDP:
			seq_puts(m, "eDP:\n");
			break;
		case INTEL_OUTPUT_DSI:
			seq_puts(m, "DSI:\n");
			break;
		case INTEL_OUTPUT_HDMI:
			seq_puts(m, "HDMI:\n");
			break;
		case INTEL_OUTPUT_DISPLAYPORT:
			seq_puts(m, "DP:\n");
			break;
		default:
			seq_printf(m, "Other encoder (id=%d).\n",
						intel_encoder->type);
			return;
		}
	}

	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Static");
	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
		seq_puts(m, "\tVBT: DRRS_type: Seamless");
	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
		seq_puts(m, "\tVBT: DRRS_type: None");
	else
		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");

	seq_puts(m, "\n\n");

3388
	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
		struct intel_panel *panel;

		mutex_lock(&drrs->mutex);
		/* DRRS Supported */
		seq_puts(m, "\tDRRS Supported: Yes\n");

		/* disable_drrs() will make drrs->dp NULL */
		if (!drrs->dp) {
			seq_puts(m, "Idleness DRRS: Disabled");
			mutex_unlock(&drrs->mutex);
			return;
		}

		panel = &drrs->dp->attached_connector->panel;
		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
					drrs->busy_frontbuffer_bits);

		seq_puts(m, "\n\t\t");
		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
			vrefresh = panel->fixed_mode->vrefresh;
		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
			vrefresh = panel->downclock_mode->vrefresh;
		} else {
			seq_printf(m, "DRRS_State: Unknown(%d)\n",
						drrs->refresh_rate_type);
			mutex_unlock(&drrs->mutex);
			return;
		}
		seq_printf(m, "\t\tVrefresh: %d", vrefresh);

		seq_puts(m, "\n\t\t");
		mutex_unlock(&drrs->mutex);
	} else {
		/* DRRS not supported. Print the VBT parameter*/
		seq_puts(m, "\tDRRS Supported : No");
	}
	seq_puts(m, "\n");
}

static int i915_drrs_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct intel_crtc *intel_crtc;
	int active_crtc_cnt = 0;

	for_each_intel_crtc(dev, intel_crtc) {
		drm_modeset_lock(&intel_crtc->base.mutex, NULL);

3440
		if (intel_crtc->base.state->active) {
3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
			active_crtc_cnt++;
			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);

			drrs_status_per_crtc(m, dev, intel_crtc);
		}

		drm_modeset_unlock(&intel_crtc->base.mutex);
	}

	if (!active_crtc_cnt)
		seq_puts(m, "No active crtc found\n");

	return 0;
}

3456 3457 3458 3459 3460 3461
struct pipe_crc_info {
	const char *name;
	struct drm_device *dev;
	enum pipe pipe;
};

3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_encoder *encoder;
	struct intel_encoder *intel_encoder;
	struct intel_digital_port *intel_dig_port;
	drm_modeset_lock_all(dev);
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
		intel_encoder = to_intel_encoder(encoder);
		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
			continue;
		intel_dig_port = enc_to_dig_port(encoder);
		if (!intel_dig_port->dp.can_mst)
			continue;
3477 3478
		seq_printf(m, "MST Source Port %c\n",
			   port_name(intel_dig_port->port));
3479 3480 3481 3482 3483 3484
		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
	}
	drm_modeset_unlock_all(dev);
	return 0;
}

3485 3486
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
3487 3488 3489 3490
	struct pipe_crc_info *info = inode->i_private;
	struct drm_i915_private *dev_priv = info->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];

3491 3492 3493
	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
		return -ENODEV;

3494 3495 3496 3497
	spin_lock_irq(&pipe_crc->lock);

	if (pipe_crc->opened) {
		spin_unlock_irq(&pipe_crc->lock);
3498 3499 3500
		return -EBUSY; /* already open */
	}

3501
	pipe_crc->opened = true;
3502 3503
	filep->private_data = inode->i_private;

3504 3505
	spin_unlock_irq(&pipe_crc->lock);

3506 3507 3508 3509 3510
	return 0;
}

static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
{
3511 3512 3513 3514
	struct pipe_crc_info *info = inode->i_private;
	struct drm_i915_private *dev_priv = info->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];

3515 3516 3517
	spin_lock_irq(&pipe_crc->lock);
	pipe_crc->opened = false;
	spin_unlock_irq(&pipe_crc->lock);
3518

3519 3520 3521 3522 3523 3524 3525 3526 3527
	return 0;
}

/* (6 fields, 8 chars each, space separated (5) + '\n') */
#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
/* account for \'0' */
#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)

static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3528
{
3529 3530 3531
	assert_spin_locked(&pipe_crc->lock);
	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
			INTEL_PIPE_CRC_ENTRIES_NR);
3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542
}

static ssize_t
i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
		   loff_t *pos)
{
	struct pipe_crc_info *info = filep->private_data;
	struct drm_device *dev = info->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
	char buf[PIPE_CRC_BUFFER_LEN];
3543
	int n_entries;
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
	ssize_t bytes_read;

	/*
	 * Don't allow user space to provide buffers not big enough to hold
	 * a line of data.
	 */
	if (count < PIPE_CRC_LINE_LEN)
		return -EINVAL;

	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3554
		return 0;
3555 3556

	/* nothing to read */
3557
	spin_lock_irq(&pipe_crc->lock);
3558
	while (pipe_crc_data_count(pipe_crc) == 0) {
3559 3560 3561 3562
		int ret;

		if (filep->f_flags & O_NONBLOCK) {
			spin_unlock_irq(&pipe_crc->lock);
3563
			return -EAGAIN;
3564
		}
3565

3566 3567 3568 3569 3570 3571
		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
		if (ret) {
			spin_unlock_irq(&pipe_crc->lock);
			return ret;
		}
3572 3573
	}

3574
	/* We now have one or more entries to read */
3575
	n_entries = count / PIPE_CRC_LINE_LEN;
3576

3577
	bytes_read = 0;
3578 3579 3580
	while (n_entries > 0) {
		struct intel_pipe_crc_entry *entry =
			&pipe_crc->entries[pipe_crc->tail];
3581
		int ret;
3582

3583 3584 3585 3586 3587 3588 3589
		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
			break;

		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);

3590 3591 3592 3593 3594 3595
		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
				       "%8u %8x %8x %8x %8x %8x\n",
				       entry->frame, entry->crc[0],
				       entry->crc[1], entry->crc[2],
				       entry->crc[3], entry->crc[4]);

3596 3597 3598
		spin_unlock_irq(&pipe_crc->lock);

		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3599 3600
		if (ret == PIPE_CRC_LINE_LEN)
			return -EFAULT;
3601

3602 3603 3604 3605 3606
		user_buf += PIPE_CRC_LINE_LEN;
		n_entries--;

		spin_lock_irq(&pipe_crc->lock);
	}
3607

3608 3609
	spin_unlock_irq(&pipe_crc->lock);

3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644
	return bytes_read;
}

static const struct file_operations i915_pipe_crc_fops = {
	.owner = THIS_MODULE,
	.open = i915_pipe_crc_open,
	.read = i915_pipe_crc_read,
	.release = i915_pipe_crc_release,
};

static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
	{
		.name = "i915_pipe_A_crc",
		.pipe = PIPE_A,
	},
	{
		.name = "i915_pipe_B_crc",
		.pipe = PIPE_B,
	},
	{
		.name = "i915_pipe_C_crc",
		.pipe = PIPE_C,
	},
};

static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
				enum pipe pipe)
{
	struct drm_device *dev = minor->dev;
	struct dentry *ent;
	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];

	info->dev = dev;
	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
				  &i915_pipe_crc_fops);
3645 3646
	if (!ent)
		return -ENOMEM;
3647 3648

	return drm_add_fake_info_node(minor, ent, info);
3649 3650
}

D
Daniel Vetter 已提交
3651
static const char * const pipe_crc_sources[] = {
3652 3653 3654 3655
	"none",
	"plane1",
	"plane2",
	"pf",
3656
	"pipe",
D
Daniel Vetter 已提交
3657 3658 3659 3660
	"TV",
	"DP-B",
	"DP-C",
	"DP-D",
3661
	"auto",
3662 3663 3664 3665 3666 3667 3668 3669
};

static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
{
	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
	return pipe_crc_sources[source];
}

3670
static int display_crc_ctl_show(struct seq_file *m, void *data)
3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
{
	struct drm_device *dev = m->private;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	for (i = 0; i < I915_MAX_PIPES; i++)
		seq_printf(m, "%c %s\n", pipe_name(i),
			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));

	return 0;
}

3683
static int display_crc_ctl_open(struct inode *inode, struct file *file)
3684 3685 3686
{
	struct drm_device *dev = inode->i_private;

3687
	return single_open(file, display_crc_ctl_show, dev);
3688 3689
}

3690
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
D
Daniel Vetter 已提交
3691 3692
				 uint32_t *val)
{
3693 3694 3695 3696
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PIPE;

	switch (*source) {
D
Daniel Vetter 已提交
3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

3710 3711 3712 3713 3714
static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
				     enum intel_pipe_crc_source *source)
{
	struct intel_encoder *encoder;
	struct intel_crtc *crtc;
3715
	struct intel_digital_port *dig_port;
3716 3717 3718 3719
	int ret = 0;

	*source = INTEL_PIPE_CRC_SOURCE_PIPE;

3720
	drm_modeset_lock_all(dev);
3721
	for_each_intel_encoder(dev, encoder) {
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735
		if (!encoder->base.crtc)
			continue;

		crtc = to_intel_crtc(encoder->base.crtc);

		if (crtc->pipe != pipe)
			continue;

		switch (encoder->type) {
		case INTEL_OUTPUT_TVOUT:
			*source = INTEL_PIPE_CRC_SOURCE_TV;
			break;
		case INTEL_OUTPUT_DISPLAYPORT:
		case INTEL_OUTPUT_EDP:
3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751
			dig_port = enc_to_dig_port(&encoder->base);
			switch (dig_port->port) {
			case PORT_B:
				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
				break;
			case PORT_C:
				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
				break;
			case PORT_D:
				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
				break;
			default:
				WARN(1, "nonexisting DP port %c\n",
				     port_name(dig_port->port));
				break;
			}
3752
			break;
3753 3754
		default:
			break;
3755 3756
		}
	}
3757
	drm_modeset_unlock_all(dev);
3758 3759 3760 3761 3762 3763 3764

	return ret;
}

static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
D
Daniel Vetter 已提交
3765 3766
				uint32_t *val)
{
3767 3768 3769
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool need_stable_symbols = false;

3770 3771 3772 3773 3774 3775 3776
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
		if (ret)
			return ret;
	}

	switch (*source) {
D
Daniel Vetter 已提交
3777 3778 3779 3780 3781
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_B:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3782
		need_stable_symbols = true;
D
Daniel Vetter 已提交
3783 3784 3785
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_C:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3786
		need_stable_symbols = true;
D
Daniel Vetter 已提交
3787
		break;
3788 3789 3790 3791 3792 3793
	case INTEL_PIPE_CRC_SOURCE_DP_D:
		if (!IS_CHERRYVIEW(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
		need_stable_symbols = true;
		break;
D
Daniel Vetter 已提交
3794 3795 3796 3797 3798 3799 3800
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
	/*
	 * When the pipe CRC tap point is after the transcoders we need
	 * to tweak symbol-level features to produce a deterministic series of
	 * symbols for a given frame. We need to reset those features only once
	 * a frame (instead of every nth symbol):
	 *   - DC-balance: used to ensure a better clock recovery from the data
	 *     link (SDVO)
	 *   - DisplayPort scrambling: used for EMI reduction
	 */
	if (need_stable_symbols) {
		uint32_t tmp = I915_READ(PORT_DFT2_G4X);

		tmp |= DC_BALANCE_RESET_VLV;
3814 3815
		switch (pipe) {
		case PIPE_A:
3816
			tmp |= PIPE_A_SCRAMBLE_RESET;
3817 3818
			break;
		case PIPE_B:
3819
			tmp |= PIPE_B_SCRAMBLE_RESET;
3820 3821 3822 3823 3824 3825 3826
			break;
		case PIPE_C:
			tmp |= PIPE_C_SCRAMBLE_RESET;
			break;
		default:
			return -EINVAL;
		}
3827 3828 3829
		I915_WRITE(PORT_DFT2_G4X, tmp);
	}

D
Daniel Vetter 已提交
3830 3831 3832
	return 0;
}

3833
static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3834 3835
				 enum pipe pipe,
				 enum intel_pipe_crc_source *source,
3836 3837
				 uint32_t *val)
{
3838 3839 3840
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool need_stable_symbols = false;

3841 3842 3843 3844 3845 3846 3847
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
		if (ret)
			return ret;
	}

	switch (*source) {
3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
		break;
	case INTEL_PIPE_CRC_SOURCE_TV:
		if (!SUPPORTS_TV(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_B:
		if (!IS_G4X(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3860
		need_stable_symbols = true;
3861 3862 3863 3864 3865
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_C:
		if (!IS_G4X(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3866
		need_stable_symbols = true;
3867 3868 3869 3870 3871
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_D:
		if (!IS_G4X(dev))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3872
		need_stable_symbols = true;
3873 3874 3875 3876 3877 3878 3879 3880
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
	/*
	 * When the pipe CRC tap point is after the transcoders we need
	 * to tweak symbol-level features to produce a deterministic series of
	 * symbols for a given frame. We need to reset those features only once
	 * a frame (instead of every nth symbol):
	 *   - DC-balance: used to ensure a better clock recovery from the data
	 *     link (SDVO)
	 *   - DisplayPort scrambling: used for EMI reduction
	 */
	if (need_stable_symbols) {
		uint32_t tmp = I915_READ(PORT_DFT2_G4X);

		WARN_ON(!IS_G4X(dev));

		I915_WRITE(PORT_DFT_I9XX,
			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);

		if (pipe == PIPE_A)
			tmp |= PIPE_A_SCRAMBLE_RESET;
		else
			tmp |= PIPE_B_SCRAMBLE_RESET;

		I915_WRITE(PORT_DFT2_G4X, tmp);
	}

3906 3907 3908
	return 0;
}

3909 3910 3911 3912 3913 3914
static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
					 enum pipe pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t tmp = I915_READ(PORT_DFT2_G4X);

3915 3916
	switch (pipe) {
	case PIPE_A:
3917
		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3918 3919
		break;
	case PIPE_B:
3920
		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3921 3922 3923 3924 3925 3926 3927
		break;
	case PIPE_C:
		tmp &= ~PIPE_C_SCRAMBLE_RESET;
		break;
	default:
		return;
	}
3928 3929 3930 3931 3932 3933
	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
		tmp &= ~DC_BALANCE_RESET_VLV;
	I915_WRITE(PORT_DFT2_G4X, tmp);

}

3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951
static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
					 enum pipe pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t tmp = I915_READ(PORT_DFT2_G4X);

	if (pipe == PIPE_A)
		tmp &= ~PIPE_A_SCRAMBLE_RESET;
	else
		tmp &= ~PIPE_B_SCRAMBLE_RESET;
	I915_WRITE(PORT_DFT2_G4X, tmp);

	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
		I915_WRITE(PORT_DFT_I9XX,
			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
	}
}

3952
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3953 3954
				uint32_t *val)
{
3955 3956 3957 3958
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PIPE;

	switch (*source) {
3959 3960 3961 3962 3963 3964 3965 3966 3967
	case INTEL_PIPE_CRC_SOURCE_PLANE1:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
		break;
	case INTEL_PIPE_CRC_SOURCE_PLANE2:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
		break;
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
		break;
D
Daniel Vetter 已提交
3968
	case INTEL_PIPE_CRC_SOURCE_NONE:
3969 3970
		*val = 0;
		break;
D
Daniel Vetter 已提交
3971 3972
	default:
		return -EINVAL;
3973 3974 3975 3976 3977
	}

	return 0;
}

3978
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3979 3980 3981 3982
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *crtc =
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3983
	struct intel_crtc_state *pipe_config;
3984 3985
	struct drm_atomic_state *state;
	int ret = 0;
3986 3987

	drm_modeset_lock_all(dev);
3988 3989 3990 3991
	state = drm_atomic_state_alloc(dev);
	if (!state) {
		ret = -ENOMEM;
		goto out;
3992 3993
	}

3994 3995 3996 3997 3998 3999
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
	pipe_config = intel_atomic_get_crtc_state(state, crtc);
	if (IS_ERR(pipe_config)) {
		ret = PTR_ERR(pipe_config);
		goto out;
	}
4000

4001 4002 4003 4004
	pipe_config->pch_pfit.force_thru = enable;
	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
	    pipe_config->pch_pfit.enabled != enable)
		pipe_config->base.connectors_changed = true;
4005

4006 4007
	ret = drm_atomic_commit(state);
out:
4008
	drm_modeset_unlock_all(dev);
4009 4010 4011
	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
	if (ret)
		drm_atomic_state_free(state);
4012 4013 4014 4015 4016
}

static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
4017 4018
				uint32_t *val)
{
4019 4020 4021 4022
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PF;

	switch (*source) {
4023 4024 4025 4026 4027 4028 4029
	case INTEL_PIPE_CRC_SOURCE_PLANE1:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PLANE2:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PF:
4030
		if (IS_HASWELL(dev) && pipe == PIPE_A)
4031
			hsw_trans_edp_pipe_A_crc_wa(dev, true);
4032

4033 4034
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
		break;
D
Daniel Vetter 已提交
4035
	case INTEL_PIPE_CRC_SOURCE_NONE:
4036 4037
		*val = 0;
		break;
D
Daniel Vetter 已提交
4038 4039
	default:
		return -EINVAL;
4040 4041 4042 4043 4044
	}

	return 0;
}

4045 4046 4047 4048
static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
			       enum intel_pipe_crc_source source)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
4049
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4050 4051
	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
									pipe));
4052
	enum intel_display_power_domain power_domain;
4053
	u32 val = 0; /* shut up gcc */
4054
	int ret;
4055

4056 4057 4058
	if (pipe_crc->source == source)
		return 0;

4059 4060 4061 4062
	/* forbid changing the source without going back to 'none' */
	if (pipe_crc->source && source)
		return -EINVAL;

4063 4064
	power_domain = POWER_DOMAIN_PIPE(pipe);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4065 4066 4067 4068
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

D
Daniel Vetter 已提交
4069
	if (IS_GEN2(dev))
4070
		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
D
Daniel Vetter 已提交
4071
	else if (INTEL_INFO(dev)->gen < 5)
4072
		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4073
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4074
		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4075
	else if (IS_GEN5(dev) || IS_GEN6(dev))
4076
		ret = ilk_pipe_crc_ctl_reg(&source, &val);
4077
	else
4078
		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4079 4080

	if (ret != 0)
4081
		goto out;
4082

4083 4084
	/* none -> real source transition */
	if (source) {
4085 4086
		struct intel_pipe_crc_entry *entries;

4087 4088 4089
		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
				 pipe_name(pipe), pipe_crc_source_name(source));

4090 4091
		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
				  sizeof(pipe_crc->entries[0]),
4092
				  GFP_KERNEL);
4093 4094 4095 4096
		if (!entries) {
			ret = -ENOMEM;
			goto out;
		}
4097

4098 4099 4100 4101 4102 4103 4104 4105
		/*
		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
		 * enabled and disabled dynamically based on package C states,
		 * user space can't make reliable use of the CRCs, so let's just
		 * completely disable it.
		 */
		hsw_disable_ips(crtc);

4106
		spin_lock_irq(&pipe_crc->lock);
4107
		kfree(pipe_crc->entries);
4108
		pipe_crc->entries = entries;
4109 4110 4111
		pipe_crc->head = 0;
		pipe_crc->tail = 0;
		spin_unlock_irq(&pipe_crc->lock);
4112 4113
	}

4114
	pipe_crc->source = source;
4115 4116 4117 4118

	I915_WRITE(PIPE_CRC_CTL(pipe), val);
	POSTING_READ(PIPE_CRC_CTL(pipe));

4119 4120
	/* real source -> none transition */
	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4121
		struct intel_pipe_crc_entry *entries;
4122 4123
		struct intel_crtc *crtc =
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4124

4125 4126 4127
		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
				 pipe_name(pipe));

4128
		drm_modeset_lock(&crtc->base.mutex, NULL);
4129
		if (crtc->base.state->active)
4130 4131
			intel_wait_for_vblank(dev, pipe);
		drm_modeset_unlock(&crtc->base.mutex);
4132

4133 4134
		spin_lock_irq(&pipe_crc->lock);
		entries = pipe_crc->entries;
4135
		pipe_crc->entries = NULL;
4136 4137
		pipe_crc->head = 0;
		pipe_crc->tail = 0;
4138 4139 4140
		spin_unlock_irq(&pipe_crc->lock);

		kfree(entries);
4141 4142 4143

		if (IS_G4X(dev))
			g4x_undo_pipe_scramble_reset(dev, pipe);
4144
		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4145
			vlv_undo_pipe_scramble_reset(dev, pipe);
4146
		else if (IS_HASWELL(dev) && pipe == PIPE_A)
4147
			hsw_trans_edp_pipe_A_crc_wa(dev, false);
4148 4149

		hsw_enable_ips(crtc);
4150 4151
	}

4152 4153 4154 4155 4156 4157
	ret = 0;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
4158 4159 4160 4161
}

/*
 * Parse pipe CRC command strings:
4162 4163 4164
 *   command: wsp* object wsp+ name wsp+ source wsp*
 *   object: 'pipe'
 *   name: (A | B | C)
4165 4166 4167 4168
 *   source: (none | plane1 | plane2 | pf)
 *   wsp: (#0x20 | #0x9 | #0xA)+
 *
 * eg.:
4169 4170
 *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
 *  "pipe A none"    ->  Stop CRC
4171
 */
4172
static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
{
	int n_words = 0;

	while (*buf) {
		char *end;

		/* skip leading white space */
		buf = skip_spaces(buf);
		if (!*buf)
			break;	/* end of buffer */

		/* find end of word */
		for (end = buf; *end && !isspace(*end); end++)
			;

		if (n_words == max_words) {
			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
					 max_words);
			return -EINVAL;	/* ran out of words[] before bytes */
		}

		if (*end)
			*end++ = '\0';
		words[n_words++] = buf;
		buf = end;
	}

	return n_words;
}

4203 4204 4205 4206
enum intel_pipe_crc_object {
	PIPE_CRC_OBJECT_PIPE,
};

D
Daniel Vetter 已提交
4207
static const char * const pipe_crc_objects[] = {
4208 4209 4210 4211
	"pipe",
};

static int
4212
display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4213 4214 4215 4216 4217
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
		if (!strcmp(buf, pipe_crc_objects[i])) {
4218
			*o = i;
4219 4220 4221 4222 4223 4224
			return 0;
		    }

	return -EINVAL;
}

4225
static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237
{
	const char name = buf[0];

	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
		return -EINVAL;

	*pipe = name - 'A';

	return 0;
}

static int
4238
display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4239 4240 4241 4242 4243
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
		if (!strcmp(buf, pipe_crc_sources[i])) {
4244
			*s = i;
4245 4246 4247 4248 4249 4250
			return 0;
		    }

	return -EINVAL;
}

4251
static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4252
{
4253
#define N_WORDS 3
4254
	int n_words;
4255
	char *words[N_WORDS];
4256
	enum pipe pipe;
4257
	enum intel_pipe_crc_object object;
4258 4259
	enum intel_pipe_crc_source source;

4260
	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4261 4262 4263 4264 4265 4266
	if (n_words != N_WORDS) {
		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
				 N_WORDS);
		return -EINVAL;
	}

4267
	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4268
		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4269 4270 4271
		return -EINVAL;
	}

4272
	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4273
		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4274 4275 4276
		return -EINVAL;
	}

4277
	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4278
		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4279 4280 4281 4282 4283 4284
		return -EINVAL;
	}

	return pipe_crc_set_source(dev, pipe, source);
}

4285 4286
static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
				     size_t len, loff_t *offp)
4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
	char *tmpbuf;
	int ret;

	if (len == 0)
		return 0;

	if (len > PAGE_SIZE - 1) {
		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
				 PAGE_SIZE);
		return -E2BIG;
	}

	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
	if (!tmpbuf)
		return -ENOMEM;

	if (copy_from_user(tmpbuf, ubuf, len)) {
		ret = -EFAULT;
		goto out;
	}
	tmpbuf[len] = '\0';

4312
	ret = display_crc_ctl_parse(dev, tmpbuf, len);
4313 4314 4315 4316 4317 4318 4319 4320 4321 4322

out:
	kfree(tmpbuf);
	if (ret < 0)
		return ret;

	*offp += len;
	return len;
}

4323
static const struct file_operations i915_display_crc_ctl_fops = {
4324
	.owner = THIS_MODULE,
4325
	.open = display_crc_ctl_open,
4326 4327 4328
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
4329
	.write = display_crc_ctl_write
4330 4331
};

4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343
static ssize_t i915_displayport_test_active_write(struct file *file,
					    const char __user *ubuf,
					    size_t len, loff_t *offp)
{
	char *input_buffer;
	int status = 0;
	struct drm_device *dev;
	struct drm_connector *connector;
	struct list_head *connector_list;
	struct intel_dp *intel_dp;
	int val = 0;

4344
	dev = ((struct seq_file *)file->private_data)->private;
4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368

	connector_list = &dev->mode_config.connector_list;

	if (len == 0)
		return 0;

	input_buffer = kmalloc(len + 1, GFP_KERNEL);
	if (!input_buffer)
		return -ENOMEM;

	if (copy_from_user(input_buffer, ubuf, len)) {
		status = -EFAULT;
		goto out;
	}

	input_buffer[len] = '\0';
	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

4369
		if (connector->status == connector_status_connected &&
4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			status = kstrtoint(input_buffer, 10, &val);
			if (status < 0)
				goto out;
			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
			/* To prevent erroneous activation of the compliance
			 * testing code, only accept an actual value of 1 here
			 */
			if (val == 1)
				intel_dp->compliance_test_active = 1;
			else
				intel_dp->compliance_test_active = 0;
		}
	}
out:
	kfree(input_buffer);
	if (status < 0)
		return status;

	*offp += len;
	return len;
}

static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			if (intel_dp->compliance_test_active)
				seq_puts(m, "1");
			else
				seq_puts(m, "0");
		} else
			seq_puts(m, "0");
	}

	return 0;
}

static int i915_displayport_test_active_open(struct inode *inode,
				       struct file *file)
{
	struct drm_device *dev = inode->i_private;

	return single_open(file, i915_displayport_test_active_show, dev);
}

static const struct file_operations i915_displayport_test_active_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_active_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = i915_displayport_test_active_write
};

static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			seq_printf(m, "%lx", intel_dp->compliance_test_data);
		} else
			seq_puts(m, "0");
	}

	return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
				       struct file *file)
{
	struct drm_device *dev = inode->i_private;

	return single_open(file, i915_displayport_test_data_show, dev);
}

static const struct file_operations i915_displayport_test_data_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_data_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
	struct drm_connector *connector;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct intel_dp *intel_dp;

	list_for_each_entry(connector, connector_list, head) {

		if (connector->connector_type !=
		    DRM_MODE_CONNECTOR_DisplayPort)
			continue;

		if (connector->status == connector_status_connected &&
		    connector->encoder != NULL) {
			intel_dp = enc_to_intel_dp(connector->encoder);
			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
		} else
			seq_puts(m, "0");
	}

	return 0;
}

static int i915_displayport_test_type_open(struct inode *inode,
				       struct file *file)
{
	struct drm_device *dev = inode->i_private;

	return single_open(file, i915_displayport_test_type_show, dev);
}

static const struct file_operations i915_displayport_test_type_fops = {
	.owner = THIS_MODULE,
	.open = i915_displayport_test_type_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release
};

4517
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4518 4519 4520
{
	struct drm_device *dev = m->private;
	int level;
4521 4522 4523 4524 4525 4526 4527 4528
	int num_levels;

	if (IS_CHERRYVIEW(dev))
		num_levels = 3;
	else if (IS_VALLEYVIEW(dev))
		num_levels = 1;
	else
		num_levels = ilk_wm_max_level(dev) + 1;
4529 4530 4531 4532 4533 4534

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++) {
		unsigned int latency = wm[level];

4535 4536
		/*
		 * - WM1+ latency values in 0.5us units
4537
		 * - latencies are in us on gen9/vlv/chv
4538
		 */
4539 4540
		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
		    IS_CHERRYVIEW(dev))
4541 4542
			latency *= 10;
		else if (level > 0)
4543 4544 4545
			latency *= 5;

		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4546
			   level, wm[level], latency / 10, latency % 10);
4547 4548 4549 4550 4551 4552 4553 4554
	}

	drm_modeset_unlock_all(dev);
}

static int pri_wm_latency_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
4555 4556 4557 4558 4559 4560 4561
	struct drm_i915_private *dev_priv = dev->dev_private;
	const uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.pri_latency;
4562

4563
	wm_latency_show(m, latencies);
4564 4565 4566 4567 4568 4569 4570

	return 0;
}

static int spr_wm_latency_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
4571 4572 4573 4574 4575 4576 4577
	struct drm_i915_private *dev_priv = dev->dev_private;
	const uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.spr_latency;
4578

4579
	wm_latency_show(m, latencies);
4580 4581 4582 4583 4584 4585 4586

	return 0;
}

static int cur_wm_latency_show(struct seq_file *m, void *data)
{
	struct drm_device *dev = m->private;
4587 4588 4589 4590 4591 4592 4593
	struct drm_i915_private *dev_priv = dev->dev_private;
	const uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.cur_latency;
4594

4595
	wm_latency_show(m, latencies);
4596 4597 4598 4599 4600 4601 4602 4603

	return 0;
}

static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;

4604
	if (INTEL_INFO(dev)->gen < 5)
4605 4606 4607 4608 4609 4610 4611 4612 4613
		return -ENODEV;

	return single_open(file, pri_wm_latency_show, dev);
}

static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;

4614
	if (HAS_GMCH_DISPLAY(dev))
4615 4616 4617 4618 4619 4620 4621 4622 4623
		return -ENODEV;

	return single_open(file, spr_wm_latency_show, dev);
}

static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;

4624
	if (HAS_GMCH_DISPLAY(dev))
4625 4626 4627 4628 4629 4630
		return -ENODEV;

	return single_open(file, cur_wm_latency_show, dev);
}

static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4631
				size_t len, loff_t *offp, uint16_t wm[8])
4632 4633 4634
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4635
	uint16_t new[8] = { 0 };
4636
	int num_levels;
4637 4638 4639 4640
	int level;
	int ret;
	char tmp[32];

4641 4642 4643 4644 4645 4646 4647
	if (IS_CHERRYVIEW(dev))
		num_levels = 3;
	else if (IS_VALLEYVIEW(dev))
		num_levels = 1;
	else
		num_levels = ilk_wm_max_level(dev) + 1;

4648 4649 4650 4651 4652 4653 4654 4655
	if (len >= sizeof(tmp))
		return -EINVAL;

	if (copy_from_user(tmp, ubuf, len))
		return -EFAULT;

	tmp[len] = '\0';

4656 4657 4658
	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
		     &new[0], &new[1], &new[2], &new[3],
		     &new[4], &new[5], &new[6], &new[7]);
4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677
	if (ret != num_levels)
		return -EINVAL;

	drm_modeset_lock_all(dev);

	for (level = 0; level < num_levels; level++)
		wm[level] = new[level];

	drm_modeset_unlock_all(dev);

	return len;
}


static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4678 4679
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint16_t *latencies;
4680

4681 4682 4683 4684 4685 4686
	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.pri_latency;

	return wm_latency_write(file, ubuf, len, offp, latencies);
4687 4688 4689 4690 4691 4692 4693
}

static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4694 4695
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint16_t *latencies;
4696

4697 4698 4699 4700 4701 4702
	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.spr_latency;

	return wm_latency_write(file, ubuf, len, offp, latencies);
4703 4704 4705 4706 4707 4708 4709
}

static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
				    size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *dev = m->private;
4710 4711 4712 4713 4714 4715 4716
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint16_t *latencies;

	if (INTEL_INFO(dev)->gen >= 9)
		latencies = dev_priv->wm.skl_latency;
	else
		latencies = to_i915(dev)->wm.cur_latency;
4717

4718
	return wm_latency_write(file, ubuf, len, offp, latencies);
4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747
}

static const struct file_operations i915_pri_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = pri_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = pri_wm_latency_write
};

static const struct file_operations i915_spr_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = spr_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = spr_wm_latency_write
};

static const struct file_operations i915_cur_wm_latency_fops = {
	.owner = THIS_MODULE,
	.open = cur_wm_latency_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
	.write = cur_wm_latency_write
};

4748 4749
static int
i915_wedged_get(void *data, u64 *val)
4750
{
4751
	struct drm_device *dev = data;
4752
	struct drm_i915_private *dev_priv = dev->dev_private;
4753

4754
	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4755

4756
	return 0;
4757 4758
}

4759 4760
static int
i915_wedged_set(void *data, u64 val)
4761
{
4762
	struct drm_device *dev = data;
4763 4764
	struct drm_i915_private *dev_priv = dev->dev_private;

4765 4766 4767 4768 4769 4770 4771 4772
	/*
	 * There is no safeguard against this debugfs entry colliding
	 * with the hangcheck calling same i915_handle_error() in
	 * parallel, causing an explosion. For now we assume that the
	 * test harness is responsible enough not to inject gpu hangs
	 * while it is writing to 'i915_wedged'
	 */

4773
	if (i915_reset_in_progress(&dev_priv->gpu_error))
4774 4775
		return -EAGAIN;

4776
	intel_runtime_pm_get(dev_priv);
4777

4778
	i915_handle_error(dev_priv, val,
4779
			  "Manually setting wedged to %llu", val);
4780 4781 4782

	intel_runtime_pm_put(dev_priv);

4783
	return 0;
4784 4785
}

4786 4787
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
4788
			"%llu\n");
4789

4790 4791
static int
i915_ring_stop_get(void *data, u64 *val)
4792
{
4793
	struct drm_device *dev = data;
4794
	struct drm_i915_private *dev_priv = dev->dev_private;
4795

4796
	*val = dev_priv->gpu_error.stop_rings;
4797

4798
	return 0;
4799 4800
}

4801 4802
static int
i915_ring_stop_set(void *data, u64 val)
4803
{
4804
	struct drm_device *dev = data;
4805
	struct drm_i915_private *dev_priv = dev->dev_private;
4806
	int ret;
4807

4808
	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4809

4810 4811 4812 4813
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

4814
	dev_priv->gpu_error.stop_rings = val;
4815 4816
	mutex_unlock(&dev->struct_mutex);

4817
	return 0;
4818 4819
}

4820 4821 4822
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
			i915_ring_stop_get, i915_ring_stop_set,
			"0x%08llx\n");
4823

4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;

	*val = dev_priv->gpu_error.missed_irq_rings;
	return 0;
}

static int
i915_ring_missed_irq_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	/* Lock against concurrent debugfs callers */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
	dev_priv->gpu_error.missed_irq_rings = val;
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
			"0x%08llx\n");

static int
i915_ring_test_irq_get(void *data, u64 *val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;

	*val = dev_priv->gpu_error.test_irq_rings;

	return 0;
}

static int
i915_ring_test_irq_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);

	/* Lock against concurrent debugfs callers */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	dev_priv->gpu_error.test_irq_rings = val;
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
			i915_ring_test_irq_get, i915_ring_test_irq_set,
			"0x%08llx\n");

4890 4891 4892 4893 4894 4895 4896 4897
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_ALL (DROP_UNBOUND | \
		  DROP_BOUND | \
		  DROP_RETIRE | \
		  DROP_ACTIVE)
4898 4899
static int
i915_drop_caches_get(void *data, u64 *val)
4900
{
4901
	*val = DROP_ALL;
4902

4903
	return 0;
4904 4905
}

4906 4907
static int
i915_drop_caches_set(void *data, u64 val)
4908
{
4909
	struct drm_device *dev = data;
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
4911
	int ret;
4912

4913
	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927

	/* No need to check and wait for gpu resets, only libdrm auto-restarts
	 * on ioctls on -EAGAIN. */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	if (val & DROP_ACTIVE) {
		ret = i915_gpu_idle(dev);
		if (ret)
			goto unlock;
	}

	if (val & (DROP_RETIRE | DROP_ACTIVE))
4928
		i915_gem_retire_requests(dev_priv);
4929

4930 4931
	if (val & DROP_BOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4932

4933 4934
	if (val & DROP_UNBOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4935 4936 4937 4938

unlock:
	mutex_unlock(&dev->struct_mutex);

4939
	return ret;
4940 4941
}

4942 4943 4944
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
			i915_drop_caches_get, i915_drop_caches_set,
			"0x%08llx\n");
4945

4946 4947
static int
i915_max_freq_get(void *data, u64 *val)
4948
{
4949
	struct drm_device *dev = data;
4950
	struct drm_i915_private *dev_priv = dev->dev_private;
4951
	int ret;
4952

4953
	if (INTEL_INFO(dev)->gen < 6)
4954 4955
		return -ENODEV;

4956 4957
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

4958
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4959 4960
	if (ret)
		return ret;
4961

4962
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4963
	mutex_unlock(&dev_priv->rps.hw_lock);
4964

4965
	return 0;
4966 4967
}

4968 4969
static int
i915_max_freq_set(void *data, u64 val)
4970
{
4971
	struct drm_device *dev = data;
4972
	struct drm_i915_private *dev_priv = dev->dev_private;
4973
	u32 hw_max, hw_min;
4974
	int ret;
4975

4976
	if (INTEL_INFO(dev)->gen < 6)
4977
		return -ENODEV;
4978

4979 4980
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

4981
	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4982

4983
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4984 4985 4986
	if (ret)
		return ret;

4987 4988 4989
	/*
	 * Turbo will still be enabled, but won't go above the set value.
	 */
4990
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
4991

4992 4993
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
4994

4995
	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
J
Jeff McGee 已提交
4996 4997
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
4998 4999
	}

5000
	dev_priv->rps.max_freq_softlimit = val;
J
Jeff McGee 已提交
5001

5002
	intel_set_rps(dev_priv, val);
J
Jeff McGee 已提交
5003

5004
	mutex_unlock(&dev_priv->rps.hw_lock);
5005

5006
	return 0;
5007 5008
}

5009 5010
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
			i915_max_freq_get, i915_max_freq_set,
5011
			"%llu\n");
5012

5013 5014
static int
i915_min_freq_get(void *data, u64 *val)
5015
{
5016
	struct drm_device *dev = data;
5017
	struct drm_i915_private *dev_priv = dev->dev_private;
5018
	int ret;
5019

5020
	if (INTEL_INFO(dev)->gen < 6)
5021 5022
		return -ENODEV;

5023 5024
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

5025
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
5026 5027
	if (ret)
		return ret;
5028

5029
	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
5030
	mutex_unlock(&dev_priv->rps.hw_lock);
5031

5032
	return 0;
5033 5034
}

5035 5036
static int
i915_min_freq_set(void *data, u64 val)
5037
{
5038
	struct drm_device *dev = data;
5039
	struct drm_i915_private *dev_priv = dev->dev_private;
5040
	u32 hw_max, hw_min;
5041
	int ret;
5042

5043
	if (INTEL_INFO(dev)->gen < 6)
5044
		return -ENODEV;
5045

5046 5047
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

5048
	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
5049

5050
	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
5051 5052 5053
	if (ret)
		return ret;

5054 5055 5056
	/*
	 * Turbo will still be enabled, but won't go below the set value.
	 */
5057
	val = intel_freq_opcode(dev_priv, val);
J
Jeff McGee 已提交
5058

5059 5060
	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
J
Jeff McGee 已提交
5061

5062
	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
J
Jeff McGee 已提交
5063 5064
		mutex_unlock(&dev_priv->rps.hw_lock);
		return -EINVAL;
5065
	}
J
Jeff McGee 已提交
5066

5067
	dev_priv->rps.min_freq_softlimit = val;
J
Jeff McGee 已提交
5068

5069
	intel_set_rps(dev_priv, val);
J
Jeff McGee 已提交
5070

5071
	mutex_unlock(&dev_priv->rps.hw_lock);
5072

5073
	return 0;
5074 5075
}

5076 5077
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
			i915_min_freq_get, i915_min_freq_set,
5078
			"%llu\n");
5079

5080 5081
static int
i915_cache_sharing_get(void *data, u64 *val)
5082
{
5083
	struct drm_device *dev = data;
5084
	struct drm_i915_private *dev_priv = dev->dev_private;
5085
	u32 snpcr;
5086
	int ret;
5087

5088 5089 5090
	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
		return -ENODEV;

5091 5092 5093
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
5094
	intel_runtime_pm_get(dev_priv);
5095

5096
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5097 5098

	intel_runtime_pm_put(dev_priv);
5099 5100
	mutex_unlock(&dev_priv->dev->struct_mutex);

5101
	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5102

5103
	return 0;
5104 5105
}

5106 5107
static int
i915_cache_sharing_set(void *data, u64 val)
5108
{
5109
	struct drm_device *dev = data;
5110 5111 5112
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 snpcr;

5113 5114 5115
	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
		return -ENODEV;

5116
	if (val > 3)
5117 5118
		return -EINVAL;

5119
	intel_runtime_pm_get(dev_priv);
5120
	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
5121 5122 5123 5124 5125 5126 5127

	/* Update the cache sharing policy here as well */
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);

5128
	intel_runtime_pm_put(dev_priv);
5129
	return 0;
5130 5131
}

5132 5133 5134
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
			i915_cache_sharing_get, i915_cache_sharing_set,
			"%llu\n");
5135

5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147
struct sseu_dev_status {
	unsigned int slice_total;
	unsigned int subslice_total;
	unsigned int subslice_per_slice;
	unsigned int eu_total;
	unsigned int eu_per_subslice;
};

static void cherryview_sseu_device_status(struct drm_device *dev,
					  struct sseu_dev_status *stat)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
5148
	int ss_max = 2;
5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179
	int ss;
	u32 sig1[ss_max], sig2[ss_max];

	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);

	for (ss = 0; ss < ss_max; ss++) {
		unsigned int eu_cnt;

		if (sig1[ss] & CHV_SS_PG_ENABLE)
			/* skip disabled subslice */
			continue;

		stat->slice_total = 1;
		stat->subslice_per_slice++;
		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
		stat->eu_total += eu_cnt;
		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
	}
	stat->subslice_total = stat->subslice_per_slice;
}

static void gen9_sseu_device_status(struct drm_device *dev,
				    struct sseu_dev_status *stat)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
5180
	int s_max = 3, ss_max = 4;
5181 5182 5183
	int s, ss;
	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];

5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195
	/* BXT has a single slice and at most 3 subslices. */
	if (IS_BROXTON(dev)) {
		s_max = 1;
		ss_max = 3;
	}

	for (s = 0; s < s_max; s++) {
		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
	}

5196 5197 5198 5199 5200 5201 5202 5203 5204 5205
	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
		     GEN9_PGCTL_SSA_EU19_ACK |
		     GEN9_PGCTL_SSA_EU210_ACK |
		     GEN9_PGCTL_SSA_EU311_ACK;
	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
		     GEN9_PGCTL_SSB_EU19_ACK |
		     GEN9_PGCTL_SSB_EU210_ACK |
		     GEN9_PGCTL_SSB_EU311_ACK;

	for (s = 0; s < s_max; s++) {
5206 5207
		unsigned int ss_cnt = 0;

5208 5209 5210 5211 5212
		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
			/* skip disabled slice */
			continue;

		stat->slice_total++;
5213

5214
		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5215 5216
			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;

5217 5218 5219
		for (ss = 0; ss < ss_max; ss++) {
			unsigned int eu_cnt;

5220 5221 5222 5223 5224 5225 5226 5227
			if (IS_BROXTON(dev) &&
			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
				/* skip disabled subslice */
				continue;

			if (IS_BROXTON(dev))
				ss_cnt++;

5228 5229 5230 5231 5232 5233
			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
					       eu_mask[ss%2]);
			stat->eu_total += eu_cnt;
			stat->eu_per_subslice = max(stat->eu_per_subslice,
						    eu_cnt);
		}
5234 5235 5236 5237

		stat->subslice_total += ss_cnt;
		stat->subslice_per_slice = max(stat->subslice_per_slice,
					       ss_cnt);
5238 5239 5240
	}
}

5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265
static void broadwell_sseu_device_status(struct drm_device *dev,
					 struct sseu_dev_status *stat)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int s;
	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);

	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);

	if (stat->slice_total) {
		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
		stat->subslice_total = stat->slice_total *
				       stat->subslice_per_slice;
		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;

		/* subtract fused off EU(s) from enabled slice(s) */
		for (s = 0; s < stat->slice_total; s++) {
			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];

			stat->eu_total -= hweight8(subslice_7eu);
		}
	}
}

5266 5267 5268 5269
static int i915_sseu_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
5270
	struct sseu_dev_status stat;
5271

5272
	if (INTEL_INFO(dev)->gen < 8)
5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292
		return -ENODEV;

	seq_puts(m, "SSEU Device Info\n");
	seq_printf(m, "  Available Slice Total: %u\n",
		   INTEL_INFO(dev)->slice_total);
	seq_printf(m, "  Available Subslice Total: %u\n",
		   INTEL_INFO(dev)->subslice_total);
	seq_printf(m, "  Available Subslice Per Slice: %u\n",
		   INTEL_INFO(dev)->subslice_per_slice);
	seq_printf(m, "  Available EU Total: %u\n",
		   INTEL_INFO(dev)->eu_total);
	seq_printf(m, "  Available EU Per Subslice: %u\n",
		   INTEL_INFO(dev)->eu_per_subslice);
	seq_printf(m, "  Has Slice Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_slice_pg));
	seq_printf(m, "  Has Subslice Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_subslice_pg));
	seq_printf(m, "  Has EU Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_eu_pg));

5293
	seq_puts(m, "SSEU Device Status\n");
5294
	memset(&stat, 0, sizeof(stat));
5295
	if (IS_CHERRYVIEW(dev)) {
5296
		cherryview_sseu_device_status(dev, &stat);
5297 5298
	} else if (IS_BROADWELL(dev)) {
		broadwell_sseu_device_status(dev, &stat);
5299
	} else if (INTEL_INFO(dev)->gen >= 9) {
5300
		gen9_sseu_device_status(dev, &stat);
5301
	}
5302 5303 5304 5305 5306 5307 5308 5309 5310 5311
	seq_printf(m, "  Enabled Slice Total: %u\n",
		   stat.slice_total);
	seq_printf(m, "  Enabled Subslice Total: %u\n",
		   stat.subslice_total);
	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
		   stat.subslice_per_slice);
	seq_printf(m, "  Enabled EU Total: %u\n",
		   stat.eu_total);
	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
		   stat.eu_per_subslice);
5312

5313 5314 5315
	return 0;
}

5316 5317 5318 5319 5320
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;
	struct drm_i915_private *dev_priv = dev->dev_private;

5321
	if (INTEL_INFO(dev)->gen < 6)
5322 5323
		return 0;

5324
	intel_runtime_pm_get(dev_priv);
5325
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5326 5327 5328 5329

	return 0;
}

5330
static int i915_forcewake_release(struct inode *inode, struct file *file)
5331 5332 5333 5334
{
	struct drm_device *dev = inode->i_private;
	struct drm_i915_private *dev_priv = dev->dev_private;

5335
	if (INTEL_INFO(dev)->gen < 6)
5336 5337
		return 0;

5338
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5339
	intel_runtime_pm_put(dev_priv);
5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355

	return 0;
}

static const struct file_operations i915_forcewake_fops = {
	.owner = THIS_MODULE,
	.open = i915_forcewake_open,
	.release = i915_forcewake_release,
};

static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
{
	struct drm_device *dev = minor->dev;
	struct dentry *ent;

	ent = debugfs_create_file("i915_forcewake_user",
B
Ben Widawsky 已提交
5356
				  S_IRUSR,
5357 5358
				  root, dev,
				  &i915_forcewake_fops);
5359 5360
	if (!ent)
		return -ENOMEM;
5361

B
Ben Widawsky 已提交
5362
	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5363 5364
}

5365 5366 5367 5368
static int i915_debugfs_create(struct dentry *root,
			       struct drm_minor *minor,
			       const char *name,
			       const struct file_operations *fops)
5369 5370 5371 5372
{
	struct drm_device *dev = minor->dev;
	struct dentry *ent;

5373
	ent = debugfs_create_file(name,
5374 5375
				  S_IRUGO | S_IWUSR,
				  root, dev,
5376
				  fops);
5377 5378
	if (!ent)
		return -ENOMEM;
5379

5380
	return drm_add_fake_info_node(minor, ent, fops);
5381 5382
}

5383
static const struct drm_info_list i915_debugfs_list[] = {
C
Chris Wilson 已提交
5384
	{"i915_capabilities", i915_capabilities, 0},
5385
	{"i915_gem_objects", i915_gem_object_info, 0},
5386
	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5387
	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5388 5389
	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5390
	{"i915_gem_stolen", i915_gem_stolen_list_info },
5391
	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5392 5393
	{"i915_gem_request", i915_gem_request_info, 0},
	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5394
	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5395
	{"i915_gem_interrupt", i915_interrupt_info, 0},
5396 5397 5398
	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
X
Xiang, Haihao 已提交
5399
	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5400
	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5401
	{"i915_guc_info", i915_guc_info, 0},
5402
	{"i915_guc_load_status", i915_guc_load_status_info, 0},
A
Alex Dai 已提交
5403
	{"i915_guc_log_dump", i915_guc_log_dump, 0},
5404
	{"i915_frequency_info", i915_frequency_info, 0},
5405
	{"i915_hangcheck_info", i915_hangcheck_info, 0},
5406
	{"i915_drpc_info", i915_drpc_info, 0},
5407
	{"i915_emon_status", i915_emon_status, 0},
5408
	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5409
	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5410
	{"i915_fbc_status", i915_fbc_status, 0},
5411
	{"i915_ips_status", i915_ips_status, 0},
5412
	{"i915_sr_status", i915_sr_status, 0},
5413
	{"i915_opregion", i915_opregion, 0},
5414
	{"i915_vbt", i915_vbt, 0},
5415
	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5416
	{"i915_context_status", i915_context_status, 0},
5417
	{"i915_dump_lrc", i915_dump_lrc, 0},
5418
	{"i915_execlists", i915_execlists, 0},
5419
	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5420
	{"i915_swizzle_info", i915_swizzle_info, 0},
D
Daniel Vetter 已提交
5421
	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5422
	{"i915_llc", i915_llc, 0},
5423
	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5424
	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5425
	{"i915_energy_uJ", i915_energy_uJ, 0},
5426
	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5427
	{"i915_power_domain_info", i915_power_domain_info, 0},
5428
	{"i915_dmc_info", i915_dmc_info, 0},
5429
	{"i915_display_info", i915_display_info, 0},
B
Ben Widawsky 已提交
5430
	{"i915_semaphore_status", i915_semaphore_status, 0},
5431
	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5432
	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5433
	{"i915_wa_registers", i915_wa_registers, 0},
5434
	{"i915_ddb_info", i915_ddb_info, 0},
5435
	{"i915_sseu_status", i915_sseu_status, 0},
5436
	{"i915_drrs_status", i915_drrs_status, 0},
5437
	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5438
};
5439
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5440

5441
static const struct i915_debugfs_files {
5442 5443 5444 5445 5446 5447 5448 5449
	const char *name;
	const struct file_operations *fops;
} i915_debugfs_files[] = {
	{"i915_wedged", &i915_wedged_fops},
	{"i915_max_freq", &i915_max_freq_fops},
	{"i915_min_freq", &i915_min_freq_fops},
	{"i915_cache_sharing", &i915_cache_sharing_fops},
	{"i915_ring_stop", &i915_ring_stop_fops},
5450 5451
	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5452 5453 5454
	{"i915_gem_drop_caches", &i915_drop_caches_fops},
	{"i915_error_state", &i915_error_state_fops},
	{"i915_next_seqno", &i915_next_seqno_fops},
5455
	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5456 5457 5458
	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5459
	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5460 5461 5462
	{"i915_dp_test_data", &i915_displayport_test_data_fops},
	{"i915_dp_test_type", &i915_displayport_test_type_fops},
	{"i915_dp_test_active", &i915_displayport_test_active_fops}
5463 5464
};

5465 5466 5467
void intel_display_crc_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
5468
	enum pipe pipe;
5469

5470
	for_each_pipe(dev_priv, pipe) {
5471
		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5472

5473 5474
		pipe_crc->opened = false;
		spin_lock_init(&pipe_crc->lock);
5475 5476 5477 5478
		init_waitqueue_head(&pipe_crc->wq);
	}
}

5479
int i915_debugfs_init(struct drm_minor *minor)
5480
{
5481
	int ret, i;
5482

5483
	ret = i915_forcewake_create(minor->debugfs_root, minor);
5484 5485
	if (ret)
		return ret;
5486

5487 5488 5489 5490 5491 5492
	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
		if (ret)
			return ret;
	}

5493 5494 5495 5496 5497 5498 5499
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
		ret = i915_debugfs_create(minor->debugfs_root, minor,
					  i915_debugfs_files[i].name,
					  i915_debugfs_files[i].fops);
		if (ret)
			return ret;
	}
5500

5501 5502
	return drm_debugfs_create_files(i915_debugfs_list,
					I915_DEBUGFS_ENTRIES,
5503 5504 5505
					minor->debugfs_root, minor);
}

5506
void i915_debugfs_cleanup(struct drm_minor *minor)
5507
{
5508 5509
	int i;

5510 5511
	drm_debugfs_remove_files(i915_debugfs_list,
				 I915_DEBUGFS_ENTRIES, minor);
5512

5513 5514
	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
				 1, minor);
5515

D
Daniel Vetter 已提交
5516
	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5517 5518 5519 5520 5521 5522
		struct drm_info_list *info_list =
			(struct drm_info_list *)&i915_pipe_crc_data[i];

		drm_debugfs_remove_files(info_list, 1, minor);
	}

5523 5524 5525 5526 5527 5528
	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
		struct drm_info_list *info_list =
			(struct drm_info_list *) i915_debugfs_files[i].fops;

		drm_debugfs_remove_files(info_list, 1, minor);
	}
5529
}
5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563

struct dpcd_block {
	/* DPCD dump start address. */
	unsigned int offset;
	/* DPCD dump end address, inclusive. If unset, .size will be used. */
	unsigned int end;
	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
	size_t size;
	/* Only valid for eDP. */
	bool edp;
};

static const struct dpcd_block i915_dpcd_debug[] = {
	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
	{ .offset = DP_SET_POWER },
	{ .offset = DP_EDP_DPCD_REV },
	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
};

static int i915_dpcd_show(struct seq_file *m, void *data)
{
	struct drm_connector *connector = m->private;
	struct intel_dp *intel_dp =
		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
	uint8_t buf[16];
	ssize_t err;
	int i;

5564 5565 5566
	if (connector->status != connector_status_connected)
		return -ENODEV;

5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586
	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
		const struct dpcd_block *b = &i915_dpcd_debug[i];
		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);

		if (b->edp &&
		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		/* low tech for now */
		if (WARN_ON(size > sizeof(buf)))
			continue;

		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
		if (err <= 0) {
			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
				  size, b->offset, err);
			continue;
		}

		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5587
	}
5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628

	return 0;
}

static int i915_dpcd_open(struct inode *inode, struct file *file)
{
	return single_open(file, i915_dpcd_show, inode->i_private);
}

static const struct file_operations i915_dpcd_fops = {
	.owner = THIS_MODULE,
	.open = i915_dpcd_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

/**
 * i915_debugfs_connector_add - add i915 specific connector debugfs files
 * @connector: pointer to a registered drm_connector
 *
 * Cleanup will be done by drm_connector_unregister() through a call to
 * drm_debugfs_connector_remove().
 *
 * Returns 0 on success, negative error codes on error.
 */
int i915_debugfs_connector_add(struct drm_connector *connector)
{
	struct dentry *root = connector->debugfs_entry;

	/* The connector must have been registered beforehands. */
	if (!root)
		return -ENODEV;

	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
				    &i915_dpcd_fops);

	return 0;
}