i915_sysfs.c 17.5 KB
Newer Older
B
Ben Widawsky 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *
 */

#include <linux/device.h>
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
32
#include "intel_drv.h"
B
Ben Widawsky 已提交
33 34
#include "i915_drv.h"

35
#define dev_to_drm_minor(d) dev_get_drvdata((d))
36

37
#ifdef CONFIG_PM
B
Ben Widawsky 已提交
38 39 40 41
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 raw_time; /* 32b value may overflow during fixed point math */
42
	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
43
	u32 ret;
B
Ben Widawsky 已提交
44 45 46 47

	if (!intel_enable_rc6(dev))
		return 0;

48 49
	intel_runtime_pm_get(dev_priv);

50
	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51
	if (IS_VALLEYVIEW(dev)) {
52
		u32 reg, czcount_30ns;
53

54 55 56 57 58 59 60 61 62
		if (IS_CHERRYVIEW(dev))
			reg = CHV_CLK_CTL1;
		else
			reg = VLV_CLK_CTL2;

		czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;

		if (!czcount_30ns) {
			WARN(!czcount_30ns, "bogus CZ count value");
63 64
			ret = 0;
			goto out;
65
		}
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

		units = 0;
		div = 1000000ULL;

		if (IS_CHERRYVIEW(dev)) {
			/* Special case for 320Mhz */
			if (czcount_30ns == 1) {
				div = 10000000ULL;
				units = 3125ULL;
			} else {
				/* chv counts are one less */
				czcount_30ns += 1;
			}
		}

		if (units == 0)
			units = DIV_ROUND_UP_ULL(30ULL * bias,
						 (u64)czcount_30ns);

85 86 87
		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
			units <<= 8;

88
		div = div * bias;
89 90 91
	}

	raw_time = I915_READ(reg) * units;
92 93 94 95 96
	ret = DIV_ROUND_UP_ULL(raw_time, div);

out:
	intel_runtime_pm_put(dev_priv);
	return ret;
B
Ben Widawsky 已提交
97 98 99
}

static ssize_t
B
Ben Widawsky 已提交
100
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
101
{
102
	struct drm_minor *dminor = dev_to_drm_minor(kdev);
103
	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
B
Ben Widawsky 已提交
104 105 106
}

static ssize_t
B
Ben Widawsky 已提交
107
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
108
{
109
	struct drm_minor *dminor = dev_get_drvdata(kdev);
B
Ben Widawsky 已提交
110
	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
111
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
B
Ben Widawsky 已提交
112 113 114
}

static ssize_t
B
Ben Widawsky 已提交
115
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
116
{
117
	struct drm_minor *dminor = dev_to_drm_minor(kdev);
B
Ben Widawsky 已提交
118
	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
119 120
	if (IS_VALLEYVIEW(dminor->dev))
		rc6p_residency = 0;
121
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
B
Ben Widawsky 已提交
122 123 124
}

static ssize_t
B
Ben Widawsky 已提交
125
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
B
Ben Widawsky 已提交
126
{
127
	struct drm_minor *dminor = dev_to_drm_minor(kdev);
B
Ben Widawsky 已提交
128
	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
129 130
	if (IS_VALLEYVIEW(dminor->dev))
		rc6pp_residency = 0;
131
	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
B
Ben Widawsky 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
}

static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);

static struct attribute *rc6_attrs[] = {
	&dev_attr_rc6_enable.attr,
	&dev_attr_rc6_residency_ms.attr,
	&dev_attr_rc6p_residency_ms.attr,
	&dev_attr_rc6pp_residency_ms.attr,
	NULL
};

static struct attribute_group rc6_attr_group = {
	.name = power_group_name,
	.attrs =  rc6_attrs
};
151
#endif
B
Ben Widawsky 已提交
152

153 154
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
155
	if (!HAS_L3_DPF(dev))
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
		return -EPERM;

	if (offset % 4 != 0)
		return -EINVAL;

	if (offset >= GEN7_L3LOG_SIZE)
		return -ENXIO;

	return 0;
}

static ssize_t
i915_l3_read(struct file *filp, struct kobject *kobj,
	     struct bin_attribute *attr, char *buf,
	     loff_t offset, size_t count)
{
	struct device *dev = container_of(kobj, struct device, kobj);
173
	struct drm_minor *dminor = dev_to_drm_minor(dev);
174 175
	struct drm_device *drm_dev = dminor->dev;
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
176
	int slice = (int)(uintptr_t)attr->private;
177
	int ret;
178

179 180
	count = round_down(count, 4);

181 182 183 184
	ret = l3_access_valid(drm_dev, offset);
	if (ret)
		return ret;

D
Dan Carpenter 已提交
185
	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
186

187 188 189 190
	ret = i915_mutex_lock_interruptible(drm_dev);
	if (ret)
		return ret;

191 192 193 194 195 196
	if (dev_priv->l3_parity.remap_info[slice])
		memcpy(buf,
		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
		       count);
	else
		memset(buf, 0, count);
197 198 199

	mutex_unlock(&drm_dev->struct_mutex);

B
Ben Widawsky 已提交
200
	return count;
201 202 203 204 205 206 207 208
}

static ssize_t
i915_l3_write(struct file *filp, struct kobject *kobj,
	      struct bin_attribute *attr, char *buf,
	      loff_t offset, size_t count)
{
	struct device *dev = container_of(kobj, struct device, kobj);
209
	struct drm_minor *dminor = dev_to_drm_minor(dev);
210 211
	struct drm_device *drm_dev = dminor->dev;
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
212
	struct intel_context *ctx;
213
	u32 *temp = NULL; /* Just here to make handling failures easy */
214
	int slice = (int)(uintptr_t)attr->private;
215 216
	int ret;

217 218 219
	if (!HAS_HW_CONTEXTS(drm_dev))
		return -ENXIO;

220 221 222 223 224 225 226 227
	ret = l3_access_valid(drm_dev, offset);
	if (ret)
		return ret;

	ret = i915_mutex_lock_interruptible(drm_dev);
	if (ret)
		return ret;

228
	if (!dev_priv->l3_parity.remap_info[slice]) {
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
		if (!temp) {
			mutex_unlock(&drm_dev->struct_mutex);
			return -ENOMEM;
		}
	}

	ret = i915_gpu_idle(drm_dev);
	if (ret) {
		kfree(temp);
		mutex_unlock(&drm_dev->struct_mutex);
		return ret;
	}

	/* TODO: Ideally we really want a GPU reset here to make sure errors
	 * aren't propagated. Since I cannot find a stable way to reset the GPU
	 * at this point it is left as a TODO.
	*/
	if (temp)
248
		dev_priv->l3_parity.remap_info[slice] = temp;
249

250
	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
251

252 253 254
	/* NB: We defer the remapping until we switch to the context */
	list_for_each_entry(ctx, &dev_priv->context_list, link)
		ctx->remap_slice |= (1<<slice);
255 256 257 258 259 260 261 262 263 264 265

	mutex_unlock(&drm_dev->struct_mutex);

	return count;
}

static struct bin_attribute dpf_attrs = {
	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
	.size = GEN7_L3LOG_SIZE,
	.read = i915_l3_read,
	.write = i915_l3_write,
266 267 268 269 270 271 272 273 274 275 276
	.mmap = NULL,
	.private = (void *)0
};

static struct bin_attribute dpf_attrs_1 = {
	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
	.size = GEN7_L3LOG_SIZE,
	.read = i915_l3_read,
	.write = i915_l3_write,
	.mmap = NULL,
	.private = (void *)1
277 278
};

279 280 281
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
				    struct device_attribute *attr, char *buf)
{
282
	struct drm_minor *minor = dev_to_drm_minor(kdev);
283 284 285 286
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

287 288
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

289 290
	intel_runtime_pm_get(dev_priv);

291
	mutex_lock(&dev_priv->rps.hw_lock);
292 293
	if (IS_VALLEYVIEW(dev_priv->dev)) {
		u32 freq;
294
		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
295
		ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
296
	} else {
297
		ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
298
	}
299
	mutex_unlock(&dev_priv->rps.hw_lock);
300

301 302
	intel_runtime_pm_put(dev_priv);

303
	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
304 305
}

306 307 308
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
				     struct device_attribute *attr, char *buf)
{
309
	struct drm_minor *minor = dev_to_drm_minor(kdev);
310 311 312 313
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	return snprintf(buf, PAGE_SIZE, "%d\n",
314
			vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
315 316
}

317 318
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
319
	struct drm_minor *minor = dev_to_drm_minor(kdev);
320 321 322 323
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

324 325
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

326
	mutex_lock(&dev_priv->rps.hw_lock);
327
	if (IS_VALLEYVIEW(dev_priv->dev))
328
		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
329
	else
330
		ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
331
	mutex_unlock(&dev_priv->rps.hw_lock);
332

333
	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
334 335
}

336 337 338 339
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
340
	struct drm_minor *minor = dev_to_drm_minor(kdev);
341 342
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
343
	u32 val;
344 345 346 347 348 349
	ssize_t ret;

	ret = kstrtou32(buf, 0, &val);
	if (ret)
		return ret;

350 351
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

352
	mutex_lock(&dev_priv->rps.hw_lock);
353

354
	if (IS_VALLEYVIEW(dev_priv->dev))
355
		val = vlv_freq_opcode(dev_priv, val);
356
	else
357
		val /= GT_FREQUENCY_MULTIPLIER;
358

359 360
	if (val < dev_priv->rps.min_freq ||
	    val > dev_priv->rps.max_freq ||
361
	    val < dev_priv->rps.min_freq_softlimit) {
362
		mutex_unlock(&dev_priv->rps.hw_lock);
363 364 365
		return -EINVAL;
	}

366
	if (val > dev_priv->rps.rp0_freq)
367 368 369
		DRM_DEBUG("User requested overclocking to %d\n",
			  val * GT_FREQUENCY_MULTIPLIER);

370
	dev_priv->rps.max_freq_softlimit = val;
371

372
	if (dev_priv->rps.cur_freq > val) {
373 374
		if (IS_VALLEYVIEW(dev))
			valleyview_set_rps(dev, val);
375
		else
376
			gen6_set_rps(dev, val);
377 378 379 380
	} else if (!IS_VALLEYVIEW(dev)) {
		/* We still need gen6_set_rps to process the new max_delay and
		 * update the interrupt limits even though frequency request is
		 * unchanged. */
381
		gen6_set_rps(dev, dev_priv->rps.cur_freq);
382
	}
383

384
	mutex_unlock(&dev_priv->rps.hw_lock);
385 386 387 388

	return count;
}

389 390
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
391
	struct drm_minor *minor = dev_to_drm_minor(kdev);
392 393 394 395
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

396 397
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

398
	mutex_lock(&dev_priv->rps.hw_lock);
399
	if (IS_VALLEYVIEW(dev_priv->dev))
400
		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
401
	else
402
		ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
403
	mutex_unlock(&dev_priv->rps.hw_lock);
404

405
	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
406 407
}

408 409 410 411
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
412
	struct drm_minor *minor = dev_to_drm_minor(kdev);
413 414
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
415
	u32 val;
416 417 418 419 420 421
	ssize_t ret;

	ret = kstrtou32(buf, 0, &val);
	if (ret)
		return ret;

422 423
	flush_delayed_work(&dev_priv->rps.delayed_resume_work);

424
	mutex_lock(&dev_priv->rps.hw_lock);
425

426
	if (IS_VALLEYVIEW(dev))
427
		val = vlv_freq_opcode(dev_priv, val);
428
	else
429 430
		val /= GT_FREQUENCY_MULTIPLIER;

431 432 433
	if (val < dev_priv->rps.min_freq ||
	    val > dev_priv->rps.max_freq ||
	    val > dev_priv->rps.max_freq_softlimit) {
434
		mutex_unlock(&dev_priv->rps.hw_lock);
435 436 437
		return -EINVAL;
	}

438
	dev_priv->rps.min_freq_softlimit = val;
439

440
	if (dev_priv->rps.cur_freq < val) {
441 442 443
		if (IS_VALLEYVIEW(dev))
			valleyview_set_rps(dev, val);
		else
444
			gen6_set_rps(dev, val);
445 446 447 448
	} else if (!IS_VALLEYVIEW(dev)) {
		/* We still need gen6_set_rps to process the new min_delay and
		 * update the interrupt limits even though frequency request is
		 * unchanged. */
449
		gen6_set_rps(dev, dev_priv->rps.cur_freq);
450
	}
451

452
	mutex_unlock(&dev_priv->rps.hw_lock);
453 454 455 456 457

	return count;

}

458
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
459 460
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
461

462
static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
463 464 465 466 467 468 469 470 471

static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);

/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
472
	struct drm_minor *minor = dev_to_drm_minor(kdev);
473 474 475 476 477 478 479 480
	struct drm_device *dev = minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 val, rp_state_cap;
	ssize_t ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
481
	intel_runtime_pm_get(dev_priv);
482
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
483
	intel_runtime_pm_put(dev_priv);
484 485 486
	mutex_unlock(&dev->struct_mutex);

	if (attr == &dev_attr_gt_RP0_freq_mhz) {
487 488 489 490
		if (IS_VALLEYVIEW(dev))
			val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
		else
			val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
491
	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
492 493 494 495
		if (IS_VALLEYVIEW(dev))
			val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
		else
			val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
496
	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
497 498 499 500
		if (IS_VALLEYVIEW(dev))
			val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
		else
			val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
501 502 503
	} else {
		BUG();
	}
504
	return snprintf(buf, PAGE_SIZE, "%d\n", val);
505 506
}

507 508 509 510
static const struct attribute *gen6_attrs[] = {
	&dev_attr_gt_cur_freq_mhz.attr,
	&dev_attr_gt_max_freq_mhz.attr,
	&dev_attr_gt_min_freq_mhz.attr,
511 512 513
	&dev_attr_gt_RP0_freq_mhz.attr,
	&dev_attr_gt_RP1_freq_mhz.attr,
	&dev_attr_gt_RPn_freq_mhz.attr,
514 515 516
	NULL,
};

517 518 519 520
static const struct attribute *vlv_attrs[] = {
	&dev_attr_gt_cur_freq_mhz.attr,
	&dev_attr_gt_max_freq_mhz.attr,
	&dev_attr_gt_min_freq_mhz.attr,
521 522 523
	&dev_attr_gt_RP0_freq_mhz.attr,
	&dev_attr_gt_RP1_freq_mhz.attr,
	&dev_attr_gt_RPn_freq_mhz.attr,
524 525 526 527
	&dev_attr_vlv_rpe_freq_mhz.attr,
	NULL,
};

528 529 530 531 532 533
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
				struct bin_attribute *attr, char *buf,
				loff_t off, size_t count)
{

	struct device *kdev = container_of(kobj, struct device, kobj);
534
	struct drm_minor *minor = dev_to_drm_minor(kdev);
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	struct drm_device *dev = minor->dev;
	struct i915_error_state_file_priv error_priv;
	struct drm_i915_error_state_buf error_str;
	ssize_t ret_count = 0;
	int ret;

	memset(&error_priv, 0, sizeof(error_priv));

	ret = i915_error_state_buf_init(&error_str, count, off);
	if (ret)
		return ret;

	error_priv.dev = dev;
	i915_error_state_get(dev, &error_priv);

	ret = i915_error_state_to_str(&error_str, &error_priv);
	if (ret)
		goto out;

	ret_count = count < error_str.bytes ? count : error_str.bytes;

	memcpy(buf, error_str.buf, ret_count);
out:
	i915_error_state_put(&error_priv);
	i915_error_state_buf_release(&error_str);

	return ret ?: ret_count;
}

static ssize_t error_state_write(struct file *file, struct kobject *kobj,
				 struct bin_attribute *attr, char *buf,
				 loff_t off, size_t count)
{
	struct device *kdev = container_of(kobj, struct device, kobj);
569
	struct drm_minor *minor = dev_to_drm_minor(kdev);
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	struct drm_device *dev = minor->dev;
	int ret;

	DRM_DEBUG_DRIVER("Resetting error state\n");

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	i915_destroy_error_state(dev);
	mutex_unlock(&dev->struct_mutex);

	return count;
}

static struct bin_attribute error_state_attr = {
	.attr.name = "error",
	.attr.mode = S_IRUSR | S_IWUSR,
	.size = 0,
	.read = error_state_read,
	.write = error_state_write,
};

B
Ben Widawsky 已提交
593 594 595 596
void i915_setup_sysfs(struct drm_device *dev)
{
	int ret;

597
#ifdef CONFIG_PM
598
	if (INTEL_INFO(dev)->gen >= 6) {
599
		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
600 601 602 603
					&rc6_attr_group);
		if (ret)
			DRM_ERROR("RC6 residency sysfs setup failed\n");
	}
604
#endif
605
	if (HAS_L3_DPF(dev)) {
606
		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
607 608
		if (ret)
			DRM_ERROR("l3 parity sysfs setup failed\n");
609 610

		if (NUM_L3_SLICES(dev) > 1) {
611
			ret = device_create_bin_file(dev->primary->kdev,
612 613 614 615
						     &dpf_attrs_1);
			if (ret)
				DRM_ERROR("l3 parity slice 1 setup failed\n");
		}
616
	}
617

618 619
	ret = 0;
	if (IS_VALLEYVIEW(dev))
620
		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
621
	else if (INTEL_INFO(dev)->gen >= 6)
622
		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
623 624
	if (ret)
		DRM_ERROR("RPS sysfs setup failed\n");
625

626
	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
627 628 629
				    &error_state_attr);
	if (ret)
		DRM_ERROR("error_state sysfs setup failed\n");
B
Ben Widawsky 已提交
630 631 632 633
}

void i915_teardown_sysfs(struct drm_device *dev)
{
634
	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
635
	if (IS_VALLEYVIEW(dev))
636
		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
637
	else
638 639 640
		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
641
#ifdef CONFIG_PM
642
	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
643
#endif
B
Ben Widawsky 已提交
644
}