i915_sysfs.c 19.1 KB
Newer Older
B
Ben Widawsky 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

#include <linux/device.h>
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
32
#include "intel_drv.h"
B
Ben Widawsky 已提交
33 34
#include "i915_drv.h"

35
#define dev_to_drm_minor(d) dev_get_drvdata((d))
36

37
#ifdef CONFIG_PM
38 39
static u32 calc_residency(struct drm_device *dev,
			  i915_reg_t reg)
B
Ben Widawsky 已提交
40
{
41
	struct drm_i915_private *dev_priv = to_i915(dev);
B
Ben Widawsky 已提交
42
	u64 raw_time; /* 32b value may overflow during fixed point math */
43
	u64 units = 128ULL, div = 100000ULL;
44
	u32 ret;
B
Ben Widawsky 已提交
45

46
	if (!intel_enable_rc6())
B
Ben Widawsky 已提交
47 48
		return 0;

49 50
	intel_runtime_pm_get(dev_priv);

51
	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
52
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
53 54
		units = 1;
		div = dev_priv->czclk_freq;
55

56 57
		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
			units <<= 8;
58 59 60
	} else if (IS_BROXTON(dev)) {
		units = 1;
		div = 1200;		/* 833.33ns */
61 62 63
	}

	raw_time = I915_READ(reg) * units;
64 65 66 67
	ret = DIV_ROUND_UP_ULL(raw_time, div);

	intel_runtime_pm_put(dev_priv);
	return ret;
B
Ben Widawsky 已提交
68 69 70
}

static ssize_t
B
Ben Widawsky 已提交
71
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
72
{
73
	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
B
Ben Widawsky 已提交
74 75 76
}

static ssize_t
B
Ben Widawsky 已提交
77
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
78
{
79
	struct drm_minor *dminor = dev_get_drvdata(kdev);
B
Ben Widawsky 已提交
80
	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
81
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
B
Ben Widawsky 已提交
82 83 84
}

static ssize_t
B
Ben Widawsky 已提交
85
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
86
{
87
	struct drm_minor *dminor = dev_to_drm_minor(kdev);
B
Ben Widawsky 已提交
88
	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
89
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
B
Ben Widawsky 已提交
90 91 92
}

static ssize_t
B
Ben Widawsky 已提交
93
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
94
{
95
	struct drm_minor *dminor = dev_to_drm_minor(kdev);
B
Ben Widawsky 已提交
96
	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
97
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
B
Ben Widawsky 已提交
98 99
}

100 101 102 103 104 105 106 107
static ssize_t
show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
	struct drm_minor *dminor = dev_get_drvdata(kdev);
	u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}

B
Ben Widawsky 已提交
108 109 110 111
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
112
static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
B
Ben Widawsky 已提交
113 114 115 116 117 118 119 120 121 122 123

static struct attribute *rc6_attrs[] = {
	&dev_attr_rc6_enable.attr,
	&dev_attr_rc6_residency_ms.attr,
	NULL
};

static struct attribute_group rc6_attr_group = {
	.name = power_group_name,
	.attrs =  rc6_attrs
};
124 125 126 127 128 129 130 131 132 133 134

static struct attribute *rc6p_attrs[] = {
	&dev_attr_rc6p_residency_ms.attr,
	&dev_attr_rc6pp_residency_ms.attr,
	NULL
};

static struct attribute_group rc6p_attr_group = {
	.name = power_group_name,
	.attrs =  rc6p_attrs
};
135 136 137 138 139 140 141 142 143 144

static struct attribute *media_rc6_attrs[] = {
	&dev_attr_media_rc6_residency_ms.attr,
	NULL
};

static struct attribute_group media_rc6_attr_group = {
	.name = power_group_name,
	.attrs =  media_rc6_attrs
};
145
#endif
B
Ben Widawsky 已提交
146

147 148
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
149
	if (!HAS_L3_DPF(dev))
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
		return -EPERM;

	if (offset % 4 != 0)
		return -EINVAL;

	if (offset >= GEN7_L3LOG_SIZE)
		return -ENXIO;

	return 0;
}

static ssize_t
i915_l3_read(struct file *filp, struct kobject *kobj,
	     struct bin_attribute *attr, char *buf,
	     loff_t offset, size_t count)
{
G
Geliang Tang 已提交
166
	struct device *dev = kobj_to_dev(kobj);
167
	struct drm_minor *dminor = dev_to_drm_minor(dev);
168
	struct drm_device *drm_dev = dminor->dev;
169
	struct drm_i915_private *dev_priv = to_i915(drm_dev);
170
	int slice = (int)(uintptr_t)attr->private;
171
	int ret;
172

173 174
	count = round_down(count, 4);

175 176 177 178
	ret = l3_access_valid(drm_dev, offset);
	if (ret)
		return ret;

D
Dan Carpenter 已提交
179
	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
180

181 182 183 184
	ret = i915_mutex_lock_interruptible(drm_dev);
	if (ret)
		return ret;

185 186 187 188 189 190
	if (dev_priv->l3_parity.remap_info[slice])
		memcpy(buf,
		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
		       count);
	else
		memset(buf, 0, count);
191 192 193

	mutex_unlock(&drm_dev->struct_mutex);

B
Ben Widawsky 已提交
194
	return count;
195 196 197 198 199 200 201
}

static ssize_t
i915_l3_write(struct file *filp, struct kobject *kobj,
	      struct bin_attribute *attr, char *buf,
	      loff_t offset, size_t count)
{
G
Geliang Tang 已提交
202
	struct device *dev = kobj_to_dev(kobj);
203
	struct drm_minor *dminor = dev_to_drm_minor(dev);
204
	struct drm_device *drm_dev = dminor->dev;
205
	struct drm_i915_private *dev_priv = to_i915(drm_dev);
206
	struct i915_gem_context *ctx;
207
	u32 *temp = NULL; /* Just here to make handling failures easy */
208
	int slice = (int)(uintptr_t)attr->private;
209 210
	int ret;

211 212 213
	if (!HAS_HW_CONTEXTS(drm_dev))
		return -ENXIO;

214 215 216 217 218 219 220 221
	ret = l3_access_valid(drm_dev, offset);
	if (ret)
		return ret;

	ret = i915_mutex_lock_interruptible(drm_dev);
	if (ret)
		return ret;

222
	if (!dev_priv->l3_parity.remap_info[slice]) {
223 224 225 226 227 228 229 230 231 232 233 234
		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
		if (!temp) {
			mutex_unlock(&drm_dev->struct_mutex);
			return -ENOMEM;
		}
	}

	/* TODO: Ideally we really want a GPU reset here to make sure errors
	 * aren't propagated. Since I cannot find a stable way to reset the GPU
	 * at this point it is left as a TODO.
	*/
	if (temp)
235
		dev_priv->l3_parity.remap_info[slice] = temp;
236

237
	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
238

239 240 241
	/* NB: We defer the remapping until we switch to the context */
	list_for_each_entry(ctx, &dev_priv->context_list, link)
		ctx->remap_slice |= (1<<slice);
242 243 244 245 246 247 248 249 250 251 252

	mutex_unlock(&drm_dev->struct_mutex);

	return count;
}

static struct bin_attribute dpf_attrs = {
	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
	.size = GEN7_L3LOG_SIZE,
	.read = i915_l3_read,
	.write = i915_l3_write,
253 254 255 256 257 258 259 260 261 262 263
	.mmap = NULL,
	.private = (void *)0
};

static struct bin_attribute dpf_attrs_1 = {
	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
	.size = GEN7_L3LOG_SIZE,
	.read = i915_l3_read,
	.write = i915_l3_write,
	.mmap = NULL,
	.private = (void *)1
264 265
};

266
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
267 268
				    struct device_attribute *attr, char *buf)
{
269
	struct drm_minor *minor = dev_to_drm_minor(kdev);
270
	struct drm_device *dev = minor->dev;
271
	struct drm_i915_private *dev_priv = to_i915(dev);
272 273
	int ret;

274 275
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

276 277
	intel_runtime_pm_get(dev_priv);

278
	mutex_lock(&dev_priv->rps.hw_lock);
279
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
280
		u32 freq;
281
		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
282
		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
283 284
	} else {
		u32 rpstat = I915_READ(GEN6_RPSTAT1);
285 286 287
		if (IS_GEN9(dev_priv))
			ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
288 289 290
			ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
		else
			ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
291
		ret = intel_gpu_freq(dev_priv, ret);
292 293 294 295 296 297 298 299 300 301 302 303 304
	}
	mutex_unlock(&dev_priv->rps.hw_lock);

	intel_runtime_pm_put(dev_priv);

	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}

static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
				    struct device_attribute *attr, char *buf)
{
	struct drm_minor *minor = dev_to_drm_minor(kdev);
	struct drm_device *dev = minor->dev;
305
	struct drm_i915_private *dev_priv = to_i915(dev);
306 307 308 309 310 311 312
	int ret;

	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->rps.hw_lock);
313
	ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
314
	mutex_unlock(&dev_priv->rps.hw_lock);
315

316 317
	intel_runtime_pm_put(dev_priv);

318
	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
319 320
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
	struct drm_minor *minor = dev_to_drm_minor(kdev);
	struct drm_i915_private *dev_priv = to_i915(minor->dev);

	return snprintf(buf, PAGE_SIZE, "%d\n",
			intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
}

static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
				       struct device_attribute *attr,
				       const char *buf, size_t count)
{
	struct drm_minor *minor = dev_to_drm_minor(kdev);
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 val;
	ssize_t ret;

	ret = kstrtou32(buf, 0, &val);
	if (ret)
		return ret;

	/* Validate against (static) hardware limits */
	val = intel_freq_opcode(dev_priv, val);
	if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
		return -EINVAL;

	mutex_lock(&dev_priv->rps.hw_lock);
	dev_priv->rps.boost_freq = val;
	mutex_unlock(&dev_priv->rps.hw_lock);

	return count;
}

356 357 358
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
				     struct device_attribute *attr, char *buf)
{
359
	struct drm_minor *minor = dev_to_drm_minor(kdev);
360
	struct drm_device *dev = minor->dev;
361
	struct drm_i915_private *dev_priv = to_i915(dev);
362

363 364 365
	return snprintf(buf, PAGE_SIZE,
			"%d\n",
			intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
366 367
}

368 369
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
370
	struct drm_minor *minor = dev_to_drm_minor(kdev);
371
	struct drm_device *dev = minor->dev;
372
	struct drm_i915_private *dev_priv = to_i915(dev);
373 374
	int ret;

375 376
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

377
	mutex_lock(&dev_priv->rps.hw_lock);
378
	ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
379
	mutex_unlock(&dev_priv->rps.hw_lock);
380

381
	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
382 383
}

384 385 386 387
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
388
	struct drm_minor *minor = dev_to_drm_minor(kdev);
389
	struct drm_device *dev = minor->dev;
390
	struct drm_i915_private *dev_priv = to_i915(dev);
391
	u32 val;
392 393 394 395 396 397
	ssize_t ret;

	ret = kstrtou32(buf, 0, &val);
	if (ret)
		return ret;

398 399
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

400 401
	intel_runtime_pm_get(dev_priv);

402
	mutex_lock(&dev_priv->rps.hw_lock);
403

404
	val = intel_freq_opcode(dev_priv, val);
405

406 407
	if (val < dev_priv->rps.min_freq ||
	    val > dev_priv->rps.max_freq ||
408
	    val < dev_priv->rps.min_freq_softlimit) {
409
		mutex_unlock(&dev_priv->rps.hw_lock);
410
		intel_runtime_pm_put(dev_priv);
411 412 413
		return -EINVAL;
	}

414
	if (val > dev_priv->rps.rp0_freq)
415
		DRM_DEBUG("User requested overclocking to %d\n",
416
			  intel_gpu_freq(dev_priv, val));
417

418
	dev_priv->rps.max_freq_softlimit = val;
419

420 421 422 423 424 425 426
	val = clamp_t(int, dev_priv->rps.cur_freq,
		      dev_priv->rps.min_freq_softlimit,
		      dev_priv->rps.max_freq_softlimit);

	/* We still need *_set_rps to process the new max_delay and
	 * update the interrupt limits and PMINTRMSK even though
	 * frequency request may be unchanged. */
427
	intel_set_rps(dev_priv, val);
428

429
	mutex_unlock(&dev_priv->rps.hw_lock);
430

431 432
	intel_runtime_pm_put(dev_priv);

433 434 435
	return count;
}

436 437
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
438
	struct drm_minor *minor = dev_to_drm_minor(kdev);
439
	struct drm_device *dev = minor->dev;
440
	struct drm_i915_private *dev_priv = to_i915(dev);
441 442
	int ret;

443 444
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

445
	mutex_lock(&dev_priv->rps.hw_lock);
446
	ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
447
	mutex_unlock(&dev_priv->rps.hw_lock);
448

449
	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
450 451
}

452 453 454 455
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
456
	struct drm_minor *minor = dev_to_drm_minor(kdev);
457
	struct drm_device *dev = minor->dev;
458
	struct drm_i915_private *dev_priv = to_i915(dev);
459
	u32 val;
460 461 462 463 464 465
	ssize_t ret;

	ret = kstrtou32(buf, 0, &val);
	if (ret)
		return ret;

466 467
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

468 469
	intel_runtime_pm_get(dev_priv);

470
	mutex_lock(&dev_priv->rps.hw_lock);
471

472
	val = intel_freq_opcode(dev_priv, val);
473

474 475 476
	if (val < dev_priv->rps.min_freq ||
	    val > dev_priv->rps.max_freq ||
	    val > dev_priv->rps.max_freq_softlimit) {
477
		mutex_unlock(&dev_priv->rps.hw_lock);
478
		intel_runtime_pm_put(dev_priv);
479 480 481
		return -EINVAL;
	}

482
	dev_priv->rps.min_freq_softlimit = val;
483

484 485 486 487 488 489 490
	val = clamp_t(int, dev_priv->rps.cur_freq,
		      dev_priv->rps.min_freq_softlimit,
		      dev_priv->rps.max_freq_softlimit);

	/* We still need *_set_rps to process the new min_delay and
	 * update the interrupt limits and PMINTRMSK even though
	 * frequency request may be unchanged. */
491
	intel_set_rps(dev_priv, val);
492

493
	mutex_unlock(&dev_priv->rps.hw_lock);
494

495 496
	intel_runtime_pm_put(dev_priv);

497 498 499 500
	return count;

}

501
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
502
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
503
static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
504 505
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
506

507
static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
508 509 510 511 512 513 514 515 516

static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);

/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
517
	struct drm_minor *minor = dev_to_drm_minor(kdev);
518
	struct drm_device *dev = minor->dev;
519
	struct drm_i915_private *dev_priv = to_i915(dev);
520
	u32 val;
521

522 523 524 525 526 527 528
	if (attr == &dev_attr_gt_RP0_freq_mhz)
		val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
	else if (attr == &dev_attr_gt_RP1_freq_mhz)
		val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
	else if (attr == &dev_attr_gt_RPn_freq_mhz)
		val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
	else
529
		BUG();
530

531
	return snprintf(buf, PAGE_SIZE, "%d\n", val);
532 533
}

534
static const struct attribute *gen6_attrs[] = {
535
	&dev_attr_gt_act_freq_mhz.attr,
536
	&dev_attr_gt_cur_freq_mhz.attr,
537
	&dev_attr_gt_boost_freq_mhz.attr,
538 539
	&dev_attr_gt_max_freq_mhz.attr,
	&dev_attr_gt_min_freq_mhz.attr,
540 541 542
	&dev_attr_gt_RP0_freq_mhz.attr,
	&dev_attr_gt_RP1_freq_mhz.attr,
	&dev_attr_gt_RPn_freq_mhz.attr,
543 544 545
	NULL,
};

546
static const struct attribute *vlv_attrs[] = {
547
	&dev_attr_gt_act_freq_mhz.attr,
548
	&dev_attr_gt_cur_freq_mhz.attr,
549
	&dev_attr_gt_boost_freq_mhz.attr,
550 551
	&dev_attr_gt_max_freq_mhz.attr,
	&dev_attr_gt_min_freq_mhz.attr,
552 553 554
	&dev_attr_gt_RP0_freq_mhz.attr,
	&dev_attr_gt_RP1_freq_mhz.attr,
	&dev_attr_gt_RPn_freq_mhz.attr,
555 556 557 558
	&dev_attr_vlv_rpe_freq_mhz.attr,
	NULL,
};

559 560 561 562 563
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
				struct bin_attribute *attr, char *buf,
				loff_t off, size_t count)
{

G
Geliang Tang 已提交
564
	struct device *kdev = kobj_to_dev(kobj);
565
	struct drm_minor *minor = dev_to_drm_minor(kdev);
566 567 568 569 570 571 572 573
	struct drm_device *dev = minor->dev;
	struct i915_error_state_file_priv error_priv;
	struct drm_i915_error_state_buf error_str;
	ssize_t ret_count = 0;
	int ret;

	memset(&error_priv, 0, sizeof(error_priv));

574
	ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
	if (ret)
		return ret;

	error_priv.dev = dev;
	i915_error_state_get(dev, &error_priv);

	ret = i915_error_state_to_str(&error_str, &error_priv);
	if (ret)
		goto out;

	ret_count = count < error_str.bytes ? count : error_str.bytes;

	memcpy(buf, error_str.buf, ret_count);
out:
	i915_error_state_put(&error_priv);
	i915_error_state_buf_release(&error_str);

	return ret ?: ret_count;
}

static ssize_t error_state_write(struct file *file, struct kobject *kobj,
				 struct bin_attribute *attr, char *buf,
				 loff_t off, size_t count)
{
G
Geliang Tang 已提交
599
	struct device *kdev = kobj_to_dev(kobj);
600
	struct drm_minor *minor = dev_to_drm_minor(kdev);
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	struct drm_device *dev = minor->dev;
	int ret;

	DRM_DEBUG_DRIVER("Resetting error state\n");

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	i915_destroy_error_state(dev);
	mutex_unlock(&dev->struct_mutex);

	return count;
}

static struct bin_attribute error_state_attr = {
	.attr.name = "error",
	.attr.mode = S_IRUSR | S_IWUSR,
	.size = 0,
	.read = error_state_read,
	.write = error_state_write,
};

B
Ben Widawsky 已提交
624 625 626 627
void i915_setup_sysfs(struct drm_device *dev)
{
	int ret;

628
#ifdef CONFIG_PM
629
	if (HAS_RC6(dev)) {
630
		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
631 632 633 634
					&rc6_attr_group);
		if (ret)
			DRM_ERROR("RC6 residency sysfs setup failed\n");
	}
635 636 637 638 639 640
	if (HAS_RC6p(dev)) {
		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
					&rc6p_attr_group);
		if (ret)
			DRM_ERROR("RC6p residency sysfs setup failed\n");
	}
641
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
642 643 644 645 646
		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
					&media_rc6_attr_group);
		if (ret)
			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
	}
647
#endif
648
	if (HAS_L3_DPF(dev)) {
649
		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
650 651
		if (ret)
			DRM_ERROR("l3 parity sysfs setup failed\n");
652 653

		if (NUM_L3_SLICES(dev) > 1) {
654
			ret = device_create_bin_file(dev->primary->kdev,
655 656 657 658
						     &dpf_attrs_1);
			if (ret)
				DRM_ERROR("l3 parity slice 1 setup failed\n");
		}
659
	}
660

661
	ret = 0;
662
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
663
		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
664
	else if (INTEL_INFO(dev)->gen >= 6)
665
		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
666 667
	if (ret)
		DRM_ERROR("RPS sysfs setup failed\n");
668

669
	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
670 671 672
				    &error_state_attr);
	if (ret)
		DRM_ERROR("error_state sysfs setup failed\n");
B
Ben Widawsky 已提交
673 674 675 676
}

void i915_teardown_sysfs(struct drm_device *dev)
{
677
	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
678
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
679
		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
680
	else
681 682 683
		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
684
#ifdef CONFIG_PM
685
	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
686
	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
687
#endif
B
Ben Widawsky 已提交
688
}