amdgpu_pm.c 100.0 KB
Newer Older
A
Alex Deucher 已提交
1
/*
2 3
 * Copyright 2017 Advanced Micro Devices, Inc.
 *
A
Alex Deucher 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Rafał Miłecki <zajec5@gmail.com>
 *          Alex Deucher <alexdeucher@gmail.com>
 */
25 26 27

#include <drm/drm_debugfs.h>

A
Alex Deucher 已提交
28 29 30 31
#include "amdgpu.h"
#include "amdgpu_drv.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
32
#include "amdgpu_smu.h"
A
Alex Deucher 已提交
33
#include "atom.h"
34
#include <linux/pci.h>
A
Alex Deucher 已提交
35 36
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
37
#include <linux/nospec.h>
38
#include <linux/pm_runtime.h>
39
#include "hwmgr.h"
40

41 42 43 44 45
static const struct cg_flag_name clocks[] = {
	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
H
Huang Rui 已提交
46
	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
47 48 49
	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
50 51
	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
52 53 54 55
	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
56
	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
57 58 59 60 61
	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
62 63
	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
64
	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
65
	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
66 67 68

	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
69 70 71
	{0, NULL},
};

72 73 74 75 76 77 78 79 80
static const struct hwmon_temp_label {
	enum PP_HWMON_TEMP channel;
	const char *label;
} temp_label[] = {
	{PP_TEMP_EDGE, "edge"},
	{PP_TEMP_JUNCTION, "junction"},
	{PP_TEMP_MEM, "mem"},
};

81 82 83
/**
 * DOC: power_dpm_state
 *
84 85 86
 * The power_dpm_state file is a legacy interface and is only provided for
 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
 * certain power related parameters.  The file power_dpm_state is used for this.
87
 * It accepts the following arguments:
88
 *
89
 * - battery
90
 *
91
 * - balanced
92
 *
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
 * - performance
 *
 * battery
 *
 * On older GPUs, the vbios provided a special power state for battery
 * operation.  Selecting battery switched to this state.  This is no
 * longer provided on newer GPUs so the option does nothing in that case.
 *
 * balanced
 *
 * On older GPUs, the vbios provided a special power state for balanced
 * operation.  Selecting balanced switched to this state.  This is no
 * longer provided on newer GPUs so the option does nothing in that case.
 *
 * performance
 *
 * On older GPUs, the vbios provided a special power state for performance
 * operation.  Selecting performance switched to this state.  This is no
 * longer provided on newer GPUs so the option does nothing in that case.
 *
 */

115 116 117
static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
					  struct device_attribute *attr,
					  char *buf)
A
Alex Deucher 已提交
118 119 120
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
121
	enum amd_pm_state_type pm;
122
	int ret;
123

124
	if (amdgpu_in_reset(adev))
125 126
		return -EPERM;

127
	ret = pm_runtime_get_sync(ddev->dev);
128 129
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
130
		return ret;
131
	}
132

133 134
	if (is_support_sw_smu(adev)) {
		if (adev->smu.ppt_funcs->get_current_power_state)
135
			pm = smu_get_current_power_state(&adev->smu);
136 137 138
		else
			pm = adev->pm.dpm.user_state;
	} else if (adev->powerplay.pp_funcs->get_current_power_state) {
139
		pm = amdgpu_dpm_get_current_power_state(adev);
140
	} else {
141
		pm = adev->pm.dpm.user_state;
142
	}
A
Alex Deucher 已提交
143

144 145 146
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

A
Alex Deucher 已提交
147 148 149 150 151
	return snprintf(buf, PAGE_SIZE, "%s\n",
			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}

152 153 154 155
static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf,
					  size_t count)
A
Alex Deucher 已提交
156 157 158
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
159
	enum amd_pm_state_type  state;
160
	int ret;
A
Alex Deucher 已提交
161

162
	if (amdgpu_in_reset(adev))
163 164
		return -EPERM;

A
Alex Deucher 已提交
165
	if (strncmp("battery", buf, strlen("battery")) == 0)
166
		state = POWER_STATE_TYPE_BATTERY;
A
Alex Deucher 已提交
167
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
168
		state = POWER_STATE_TYPE_BALANCED;
A
Alex Deucher 已提交
169
	else if (strncmp("performance", buf, strlen("performance")) == 0)
170
		state = POWER_STATE_TYPE_PERFORMANCE;
171 172
	else
		return -EINVAL;
A
Alex Deucher 已提交
173

174
	ret = pm_runtime_get_sync(ddev->dev);
175 176
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
177
		return ret;
178
	}
179

180 181 182 183 184
	if (is_support_sw_smu(adev)) {
		mutex_lock(&adev->pm.mutex);
		adev->pm.dpm.user_state = state;
		mutex_unlock(&adev->pm.mutex);
	} else if (adev->powerplay.pp_funcs->dispatch_tasks) {
185
		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
186 187 188 189 190
	} else {
		mutex_lock(&adev->pm.mutex);
		adev->pm.dpm.user_state = state;
		mutex_unlock(&adev->pm.mutex);

191
		amdgpu_pm_compute_clocks(adev);
192
	}
193 194 195
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

A
Alex Deucher 已提交
196 197 198
	return count;
}

199 200 201 202 203 204 205

/**
 * DOC: power_dpm_force_performance_level
 *
 * The amdgpu driver provides a sysfs API for adjusting certain power
 * related parameters.  The file power_dpm_force_performance_level is
 * used for this.  It accepts the following arguments:
206
 *
207
 * - auto
208
 *
209
 * - low
210
 *
211
 * - high
212
 *
213
 * - manual
214
 *
215
 * - profile_standard
216
 *
217
 * - profile_min_sclk
218
 *
219
 * - profile_min_mclk
220
 *
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
 * - profile_peak
 *
 * auto
 *
 * When auto is selected, the driver will attempt to dynamically select
 * the optimal power profile for current conditions in the driver.
 *
 * low
 *
 * When low is selected, the clocks are forced to the lowest power state.
 *
 * high
 *
 * When high is selected, the clocks are forced to the highest power state.
 *
 * manual
 *
 * When manual is selected, the user can manually adjust which power states
 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
 * and pp_dpm_pcie files and adjust the power state transition heuristics
 * via the pp_power_profile_mode sysfs file.
 *
 * profile_standard
 * profile_min_sclk
 * profile_min_mclk
 * profile_peak
 *
 * When the profiling modes are selected, clock and power gating are
 * disabled and the clocks are set for different profiling cases. This
 * mode is recommended for profiling specific work loads where you do
 * not want clock or power gating for clock fluctuation to interfere
 * with your results. profile_standard sets the clocks to a fixed clock
 * level which varies from asic to asic.  profile_min_sclk forces the sclk
 * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
 *
 */

259 260 261
static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
							    struct device_attribute *attr,
							    char *buf)
A
Alex Deucher 已提交
262 263 264
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
265
	enum amd_dpm_forced_level level = 0xff;
266
	int ret;
A
Alex Deucher 已提交
267

268
	if (amdgpu_in_reset(adev))
269 270
		return -EPERM;

271
	ret = pm_runtime_get_sync(ddev->dev);
272 273
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
274
		return ret;
275
	}
276

277 278 279
	if (is_support_sw_smu(adev))
		level = smu_get_performance_level(&adev->smu);
	else if (adev->powerplay.pp_funcs->get_performance_level)
280 281 282 283
		level = amdgpu_dpm_get_performance_level(adev);
	else
		level = adev->pm.dpm.forced_level;

284 285 286
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

287
	return snprintf(buf, PAGE_SIZE, "%s\n",
R
Rex Zhu 已提交
288 289 290 291 292 293 294 295 296
			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
			(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
			(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
			(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
			(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
			"unknown");
A
Alex Deucher 已提交
297 298
}

299 300 301 302
static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
							    struct device_attribute *attr,
							    const char *buf,
							    size_t count)
A
Alex Deucher 已提交
303 304 305
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
306
	enum amd_dpm_forced_level level;
307
	enum amd_dpm_forced_level current_level = 0xff;
A
Alex Deucher 已提交
308 309
	int ret = 0;

310
	if (amdgpu_in_reset(adev))
311 312
		return -EPERM;

A
Alex Deucher 已提交
313
	if (strncmp("low", buf, strlen("low")) == 0) {
314
		level = AMD_DPM_FORCED_LEVEL_LOW;
A
Alex Deucher 已提交
315
	} else if (strncmp("high", buf, strlen("high")) == 0) {
316
		level = AMD_DPM_FORCED_LEVEL_HIGH;
A
Alex Deucher 已提交
317
	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
318
		level = AMD_DPM_FORCED_LEVEL_AUTO;
319
	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
320
		level = AMD_DPM_FORCED_LEVEL_MANUAL;
R
Rex Zhu 已提交
321 322 323 324 325 326 327 328 329 330 331
	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
	}  else {
332
		return -EINVAL;
A
Alex Deucher 已提交
333
	}
334

335
	ret = pm_runtime_get_sync(ddev->dev);
336 337
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
338
		return ret;
339
	}
340

341 342 343 344
	if (is_support_sw_smu(adev))
		current_level = smu_get_performance_level(&adev->smu);
	else if (adev->powerplay.pp_funcs->get_performance_level)
		current_level = amdgpu_dpm_get_performance_level(adev);
345

346 347 348
	if (current_level == level) {
		pm_runtime_mark_last_busy(ddev->dev);
		pm_runtime_put_autosuspend(ddev->dev);
349
		return count;
350
	}
351

352
	if (adev->asic_type == CHIP_RAVEN) {
A
Alex Deucher 已提交
353
		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
354 355 356 357 358 359 360
			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
				amdgpu_gfx_off_ctrl(adev, false);
			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
				amdgpu_gfx_off_ctrl(adev, true);
		}
	}

361 362 363 364 365 366 367
	/* profile_exit setting is valid only when current mode is in profile mode */
	if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
	    AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
	    AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
	    AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
		pr_err("Currently not in any profile mode!\n");
368 369
		pm_runtime_mark_last_busy(ddev->dev);
		pm_runtime_put_autosuspend(ddev->dev);
370
		return -EINVAL;
371 372
	}

373 374
	if (is_support_sw_smu(adev)) {
		ret = smu_force_performance_level(&adev->smu, level);
375 376 377
		if (ret) {
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
378
			return -EINVAL;
379
		}
380
	} else if (adev->powerplay.pp_funcs->force_performance_level) {
381
		mutex_lock(&adev->pm.mutex);
A
Alex Deucher 已提交
382
		if (adev->pm.dpm.thermal_active) {
383
			mutex_unlock(&adev->pm.mutex);
384 385
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
386
			return -EINVAL;
A
Alex Deucher 已提交
387 388
		}
		ret = amdgpu_dpm_force_performance_level(adev, level);
389 390 391 392
		if (ret) {
			mutex_unlock(&adev->pm.mutex);
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
393
			return -EINVAL;
394
		} else {
395
			adev->pm.dpm.forced_level = level;
396
		}
397
		mutex_unlock(&adev->pm.mutex);
A
Alex Deucher 已提交
398
	}
399 400
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
R
Rex Zhu 已提交
401

402
	return count;
A
Alex Deucher 已提交
403 404
}

405 406 407 408 409 410 411
static ssize_t amdgpu_get_pp_num_states(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	struct pp_states_info data;
412
	int i, buf_len, ret;
413

414
	if (amdgpu_in_reset(adev))
415 416
		return -EPERM;

417
	ret = pm_runtime_get_sync(ddev->dev);
418 419
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
420
		return ret;
421
	}
422

423 424 425 426
	if (is_support_sw_smu(adev)) {
		ret = smu_get_power_num_states(&adev->smu, &data);
		if (ret)
			return ret;
427
	} else if (adev->powerplay.pp_funcs->get_pp_num_states) {
428
		amdgpu_dpm_get_pp_num_states(adev, &data);
429 430 431
	} else {
		memset(&data, 0, sizeof(data));
	}
432

433 434 435
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
	for (i = 0; i < data.nums; i++)
		buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");

	return buf_len;
}

static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	struct pp_states_info data;
454
	struct smu_context *smu = &adev->smu;
455
	enum amd_pm_state_type pm = 0;
456
	int i = 0, ret = 0;
457

458
	if (amdgpu_in_reset(adev))
459 460
		return -EPERM;

461
	ret = pm_runtime_get_sync(ddev->dev);
462 463
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
464
		return ret;
465
	}
466

467 468 469 470 471 472
	if (is_support_sw_smu(adev)) {
		pm = smu_get_current_power_state(smu);
		ret = smu_get_power_num_states(smu, &data);
		if (ret)
			return ret;
	} else if (adev->powerplay.pp_funcs->get_current_power_state
473
		 && adev->powerplay.pp_funcs->get_pp_num_states) {
474 475
		pm = amdgpu_dpm_get_current_power_state(adev);
		amdgpu_dpm_get_pp_num_states(adev, &data);
476
	}
477

478 479 480
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

481 482 483
	for (i = 0; i < data.nums; i++) {
		if (pm == data.states[i])
			break;
484 485
	}

486 487 488
	if (i == data.nums)
		i = -EINVAL;

489 490 491 492 493 494 495 496 497 498
	return snprintf(buf, PAGE_SIZE, "%d\n", i);
}

static ssize_t amdgpu_get_pp_force_state(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

499
	if (amdgpu_in_reset(adev))
500 501
		return -EPERM;

502 503 504
	if (adev->pp_force_state_enabled)
		return amdgpu_get_pp_cur_state(dev, attr, buf);
	else
505 506 507 508 509 510 511 512 513 514 515
		return snprintf(buf, PAGE_SIZE, "\n");
}

static ssize_t amdgpu_set_pp_force_state(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	enum amd_pm_state_type state = 0;
516
	unsigned long idx;
517 518
	int ret;

519
	if (amdgpu_in_reset(adev))
520 521
		return -EPERM;

522 523
	if (strlen(buf) == 1)
		adev->pp_force_state_enabled = false;
524 525
	else if (is_support_sw_smu(adev))
		adev->pp_force_state_enabled = false;
R
Rex Zhu 已提交
526 527
	else if (adev->powerplay.pp_funcs->dispatch_tasks &&
			adev->powerplay.pp_funcs->get_pp_num_states) {
528
		struct pp_states_info data;
529

530
		ret = kstrtoul(buf, 0, &idx);
531 532 533
		if (ret || idx >= ARRAY_SIZE(data.states))
			return -EINVAL;

534
		idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
535

536 537
		amdgpu_dpm_get_pp_num_states(adev, &data);
		state = data.states[idx];
538 539

		ret = pm_runtime_get_sync(ddev->dev);
540 541
		if (ret < 0) {
			pm_runtime_put_autosuspend(ddev->dev);
542
			return ret;
543
		}
544

545 546 547 548
		/* only set user selected power states */
		if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
		    state != POWER_STATE_TYPE_DEFAULT) {
			amdgpu_dpm_dispatch_task(adev,
549
					AMD_PP_TASK_ENABLE_USER_STATE, &state);
550
			adev->pp_force_state_enabled = true;
551
		}
552 553
		pm_runtime_mark_last_busy(ddev->dev);
		pm_runtime_put_autosuspend(ddev->dev);
554
	}
555

556 557 558
	return count;
}

A
Alex Deucher 已提交
559 560 561 562 563 564 565 566 567 568 569
/**
 * DOC: pp_table
 *
 * The amdgpu driver provides a sysfs API for uploading new powerplay
 * tables.  The file pp_table is used for this.  Reading the file
 * will dump the current power play table.  Writing to the file
 * will attempt to upload a new powerplay table and re-initialize
 * powerplay using that new table.
 *
 */

570 571 572 573 574 575 576
static ssize_t amdgpu_get_pp_table(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	char *table = NULL;
577
	int size, ret;
578

579
	if (amdgpu_in_reset(adev))
580 581
		return -EPERM;

582
	ret = pm_runtime_get_sync(ddev->dev);
583 584
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
585
		return ret;
586
	}
587

588 589
	if (is_support_sw_smu(adev)) {
		size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
590 591
		pm_runtime_mark_last_busy(ddev->dev);
		pm_runtime_put_autosuspend(ddev->dev);
592 593
		if (size < 0)
			return size;
594
	} else if (adev->powerplay.pp_funcs->get_pp_table) {
595
		size = amdgpu_dpm_get_pp_table(adev, &table);
596 597 598 599 600 601 602
		pm_runtime_mark_last_busy(ddev->dev);
		pm_runtime_put_autosuspend(ddev->dev);
		if (size < 0)
			return size;
	} else {
		pm_runtime_mark_last_busy(ddev->dev);
		pm_runtime_put_autosuspend(ddev->dev);
603
		return 0;
604
	}
605 606 607 608

	if (size >= PAGE_SIZE)
		size = PAGE_SIZE - 1;

609
	memcpy(buf, table, size);
610 611 612 613 614 615 616 617 618 619 620

	return size;
}

static ssize_t amdgpu_set_pp_table(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
621
	int ret = 0;
622

623
	if (amdgpu_in_reset(adev))
624 625
		return -EPERM;

626
	ret = pm_runtime_get_sync(ddev->dev);
627 628
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
629
		return ret;
630
	}
631

632 633
	if (is_support_sw_smu(adev)) {
		ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
634 635 636
		if (ret) {
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
637
			return ret;
638
		}
639
	} else if (adev->powerplay.pp_funcs->set_pp_table)
640 641
		amdgpu_dpm_set_pp_table(adev, buf, count);

642 643 644
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

645 646 647
	return count;
}

648 649 650 651 652 653 654
/**
 * DOC: pp_od_clk_voltage
 *
 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
 * in each power level within a power state.  The pp_od_clk_voltage is used for
 * this.
 *
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
 * Note that the actual memory controller clock rate are exposed, not
 * the effective memory clock of the DRAMs. To translate it, use the
 * following formula:
 *
 * Clock conversion (Mhz):
 *
 * HBM: effective_memory_clock = memory_controller_clock * 1
 *
 * G5: effective_memory_clock = memory_controller_clock * 1
 *
 * G6: effective_memory_clock = memory_controller_clock * 2
 *
 * DRAM data rate (MT/s):
 *
 * HBM: effective_memory_clock * 2 = data_rate
 *
 * G5: effective_memory_clock * 4 = data_rate
 *
 * G6: effective_memory_clock * 8 = data_rate
 *
 * Bandwidth (MB/s):
 *
 * data_rate * vram_bit_width / 8 = memory_bandwidth
 *
 * Some examples:
 *
 * G5 on RX460:
 *
 * memory_controller_clock = 1750 Mhz
 *
 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
 *
 * data rate = 1750 * 4 = 7000 MT/s
 *
 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
 *
 * G6 on RX5700:
 *
 * memory_controller_clock = 875 Mhz
 *
 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
 *
 * data rate = 1750 * 8 = 14000 MT/s
 *
 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
 *
701 702
 * < For Vega10 and previous ASICs >
 *
703
 * Reading the file will display:
704
 *
705
 * - a list of engine clock levels and voltages labeled OD_SCLK
706
 *
707
 * - a list of memory clock levels and voltages labeled OD_MCLK
708
 *
709 710 711 712 713 714 715 716 717 718 719
 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
 *
 * To manually adjust these settings, first select manual using
 * power_dpm_force_performance_level. Enter a new value for each
 * level by writing a string that contains "s/m level clock voltage" to
 * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
 * 810 mV.  When you have edited all of the states as needed, write
 * "c" (commit) to the file to commit your changes.  If you want to reset to the
 * default power levels, write "r" (reset) to the file to reset them.
 *
720
 *
721
 * < For Vega20 and newer ASICs >
722 723 724 725 726 727 728
 *
 * Reading the file will display:
 *
 * - minimum and maximum engine clock labeled OD_SCLK
 *
 * - maximum memory clock labeled OD_MCLK
 *
729
 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
 *   They can be used to calibrate the sclk voltage curve.
 *
 * - a list of valid ranges for sclk, mclk, and voltage curve points
 *   labeled OD_RANGE
 *
 * To manually adjust these settings:
 *
 * - First select manual using power_dpm_force_performance_level
 *
 * - For clock frequency setting, enter a new value by writing a
 *   string that contains "s/m index clock" to the file. The index
 *   should be 0 if to set minimum clock. And 1 if to set maximum
 *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
 *   "m 1 800" will update maximum mclk to be 800Mhz.
 *
 *   For sclk voltage curve, enter the new values by writing a
746 747 748 749 750
 *   string that contains "vc point clock voltage" to the file. The
 *   points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
 *   update point1 with clock set as 300Mhz and voltage as
 *   600mV. "vc 2 1000 1000" will update point3 with clock set
 *   as 1000Mhz and voltage 1000mV.
751 752 753 754 755 756 757
 *
 * - When you have edited all of the states as needed, write "c" (commit)
 *   to the file to commit your changes
 *
 * - If you want to reset to the default power levels, write "r" (reset)
 *   to the file to reset them
 *
758 759
 */

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	uint32_t parameter_size = 0;
	long parameter[64];
	char buf_cpy[128];
	char *tmp_str;
	char *sub_str;
	const char delimiter[3] = {' ', '\n', '\0'};
	uint32_t type;

776
	if (amdgpu_in_reset(adev))
777 778
		return -EPERM;

779 780 781 782 783 784 785 786 787 788 789
	if (count > 127)
		return -EINVAL;

	if (*buf == 's')
		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
	else if (*buf == 'm')
		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
	else if(*buf == 'r')
		type = PP_OD_RESTORE_DEFAULT_TABLE;
	else if (*buf == 'c')
		type = PP_OD_COMMIT_DPM_TABLE;
790 791
	else if (!strncmp(buf, "vc", 2))
		type = PP_OD_EDIT_VDDC_CURVE;
792 793 794 795 796 797 798
	else
		return -EINVAL;

	memcpy(buf_cpy, buf, count+1);

	tmp_str = buf_cpy;

799 800
	if (type == PP_OD_EDIT_VDDC_CURVE)
		tmp_str++;
801 802
	while (isspace(*++tmp_str));

803 804
	while (tmp_str[0]) {
		sub_str = strsep(&tmp_str, delimiter);
805 806 807 808 809 810 811 812 813
		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
		if (ret)
			return -EINVAL;
		parameter_size++;

		while (isspace(*tmp_str))
			tmp_str++;
	}

814
	ret = pm_runtime_get_sync(ddev->dev);
815 816
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
817
		return ret;
818
	}
819

820 821 822
	if (is_support_sw_smu(adev)) {
		ret = smu_od_edit_dpm_table(&adev->smu, type,
					    parameter, parameter_size);
823

824 825 826
		if (ret) {
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
827
			return -EINVAL;
828
		}
829
	} else {
830
		if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
831 832
			ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
						parameter, parameter_size);
833 834 835
			if (ret) {
				pm_runtime_mark_last_busy(ddev->dev);
				pm_runtime_put_autosuspend(ddev->dev);
836
				return -EINVAL;
837
			}
838
		}
839 840 841 842 843 844

		if (type == PP_OD_COMMIT_DPM_TABLE) {
			if (adev->powerplay.pp_funcs->dispatch_tasks) {
				amdgpu_dpm_dispatch_task(adev,
						AMD_PP_TASK_READJUST_POWER_STATE,
						NULL);
845 846
				pm_runtime_mark_last_busy(ddev->dev);
				pm_runtime_put_autosuspend(ddev->dev);
847
				return count;
848
			} else {
849 850
				pm_runtime_mark_last_busy(ddev->dev);
				pm_runtime_put_autosuspend(ddev->dev);
851
				return -EINVAL;
852
			}
853 854
		}
	}
855 856
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
857

858
	return count;
859 860 861 862 863 864 865 866
}

static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
867 868
	ssize_t size;
	int ret;
869

870
	if (amdgpu_in_reset(adev))
871 872
		return -EPERM;

873
	ret = pm_runtime_get_sync(ddev->dev);
874 875
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
876
		return ret;
877
	}
878

879
	if (is_support_sw_smu(adev)) {
880 881 882 883
		size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
		size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
		size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
884
	} else if (adev->powerplay.pp_funcs->print_clock_levels) {
885 886
		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
887
		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
888
		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
889
	} else {
890
		size = snprintf(buf, PAGE_SIZE, "\n");
891
	}
892 893
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
894

895
	return size;
896 897
}

898
/**
899
 * DOC: pp_features
900 901
 *
 * The amdgpu driver provides a sysfs API for adjusting what powerplay
902
 * features to be enabled. The file pp_features is used for this. And
903 904 905 906 907 908 909 910 911 912 913
 * this is only available for Vega10 and later dGPUs.
 *
 * Reading back the file will show you the followings:
 * - Current ppfeature masks
 * - List of the all supported powerplay features with their naming,
 *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
 *
 * To manually enable or disable a specific feature, just set or clear
 * the corresponding bit from original ppfeature masks and input the
 * new ppfeature masks.
 */
914 915 916 917
static ssize_t amdgpu_set_pp_features(struct device *dev,
				      struct device_attribute *attr,
				      const char *buf,
				      size_t count)
918 919 920 921 922 923
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	uint64_t featuremask;
	int ret;

924
	if (amdgpu_in_reset(adev))
925 926
		return -EPERM;

927 928 929 930 931 932
	ret = kstrtou64(buf, 0, &featuremask);
	if (ret)
		return -EINVAL;

	pr_debug("featuremask = 0x%llx\n", featuremask);

933
	ret = pm_runtime_get_sync(ddev->dev);
934 935
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
936
		return ret;
937
	}
938

939
	if (is_support_sw_smu(adev)) {
940
		ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
941 942 943 944 945
		if (ret) {
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
			return -EINVAL;
		}
946
	} else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
947
		ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
948 949 950 951 952
		if (ret) {
			pm_runtime_mark_last_busy(ddev->dev);
			pm_runtime_put_autosuspend(ddev->dev);
			return -EINVAL;
		}
953
	}
954 955
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
956 957 958 959

	return count;
}

960 961 962
static ssize_t amdgpu_get_pp_features(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
963 964 965
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
966 967
	ssize_t size;
	int ret;
968

969
	if (amdgpu_in_reset(adev))
970 971
		return -EPERM;

972
	ret = pm_runtime_get_sync(ddev->dev);
973 974
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
975
		return ret;
976
	}
977 978 979 980 981 982 983 984 985 986

	if (is_support_sw_smu(adev))
		size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
	else if (adev->powerplay.pp_funcs->get_ppfeature_status)
		size = amdgpu_dpm_get_ppfeature_status(adev, buf);
	else
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
987

988
	return size;
989 990
}

991
/**
992
 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
993 994 995
 *
 * The amdgpu driver provides a sysfs API for adjusting what power levels
 * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
996 997
 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
 * this.
998
 *
999 1000
 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
 * Vega10 and later ASICs.
1001
 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
1002 1003 1004 1005 1006
 *
 * Reading back the files will show you the available power levels within
 * the power state and the clock information for those levels.
 *
 * To manually adjust these states, first select manual using
1007
 * power_dpm_force_performance_level.
1008
 * Secondly, enter a new value for each level by inputing a string that
1009
 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1010 1011 1012 1013 1014 1015 1016
 * E.g.,
 *
 * .. code-block:: bash
 *
 *	echo "4 5 6" > pp_dpm_sclk
 *
 * will enable sclk levels 4, 5, and 6.
1017 1018
 *
 * NOTE: change to the dcefclk max dpm level is not supported now
1019 1020
 */

1021 1022 1023 1024 1025 1026
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1027 1028
	ssize_t size;
	int ret;
1029

1030
	if (amdgpu_in_reset(adev))
1031 1032
		return -EPERM;

1033
	ret = pm_runtime_get_sync(ddev->dev);
1034 1035
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1036
		return ret;
1037
	}
1038

1039
	if (is_support_sw_smu(adev))
1040
		size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1041
	else if (adev->powerplay.pp_funcs->print_clock_levels)
1042
		size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1043
	else
1044 1045 1046 1047 1048 1049
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
1050 1051
}

K
Kees Cook 已提交
1052 1053 1054 1055 1056 1057 1058
/*
 * Worst case: 32 bits individually specified, in octal at 12 characters
 * per line (+1 for \n).
 */
#define AMDGPU_MASK_BUF_MAX	(32 * 13)

static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1059 1060 1061
{
	int ret;
	long level;
1062 1063
	char *sub_str = NULL;
	char *tmp;
K
Kees Cook 已提交
1064
	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1065
	const char delimiter[3] = {' ', '\n', '\0'};
K
Kees Cook 已提交
1066
	size_t bytes;
1067

K
Kees Cook 已提交
1068 1069 1070 1071 1072
	*mask = 0;

	bytes = min(count, sizeof(buf_cpy) - 1);
	memcpy(buf_cpy, buf, bytes);
	buf_cpy[bytes] = '\0';
1073
	tmp = buf_cpy;
1074 1075
	while (tmp[0]) {
		sub_str = strsep(&tmp, delimiter);
1076 1077
		if (strlen(sub_str)) {
			ret = kstrtol(sub_str, 0, &level);
K
Kees Cook 已提交
1078 1079 1080
			if (ret)
				return -EINVAL;
			*mask |= 1 << level;
1081 1082
		} else
			break;
1083
	}
K
Kees Cook 已提交
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097

	return 0;
}

static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	uint32_t mask = 0;

1098
	if (amdgpu_in_reset(adev))
1099 1100
		return -EPERM;

K
Kees Cook 已提交
1101 1102 1103 1104
	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;

1105
	ret = pm_runtime_get_sync(ddev->dev);
1106 1107
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1108
		return ret;
1109
	}
1110

1111
	if (is_support_sw_smu(adev))
1112
		ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1113
	else if (adev->powerplay.pp_funcs->force_clock_level)
1114 1115
		ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);

1116 1117 1118
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1119 1120
	if (ret)
		return -EINVAL;
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130
	return count;
}

static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1131 1132
	ssize_t size;
	int ret;
1133

1134
	if (amdgpu_in_reset(adev))
1135 1136
		return -EPERM;

1137
	ret = pm_runtime_get_sync(ddev->dev);
1138 1139
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1140
		return ret;
1141
	}
1142

1143
	if (is_support_sw_smu(adev))
1144
		size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1145
	else if (adev->powerplay.pp_funcs->print_clock_levels)
1146
		size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1147
	else
1148 1149 1150 1151 1152 1153
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
1154 1155 1156 1157 1158 1159 1160 1161 1162
}

static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1163
	uint32_t mask = 0;
1164
	int ret;
1165

1166
	if (amdgpu_in_reset(adev))
1167 1168
		return -EPERM;

K
Kees Cook 已提交
1169 1170 1171
	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
1172

1173
	ret = pm_runtime_get_sync(ddev->dev);
1174 1175
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1176
		return ret;
1177
	}
1178

1179
	if (is_support_sw_smu(adev))
1180
		ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1181
	else if (adev->powerplay.pp_funcs->force_clock_level)
1182 1183
		ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);

1184 1185 1186
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1187 1188
	if (ret)
		return -EINVAL;
1189

1190 1191 1192
	return count;
}

1193 1194 1195 1196 1197 1198
static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1199 1200
	ssize_t size;
	int ret;
1201

1202
	if (amdgpu_in_reset(adev))
1203 1204
		return -EPERM;

1205
	ret = pm_runtime_get_sync(ddev->dev);
1206 1207
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1208
		return ret;
1209
	}
1210

1211
	if (is_support_sw_smu(adev))
1212
		size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1213
	else if (adev->powerplay.pp_funcs->print_clock_levels)
1214
		size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1215
	else
1216 1217 1218 1219 1220 1221
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
}

static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	uint32_t mask = 0;

1234
	if (amdgpu_in_reset(adev))
1235 1236
		return -EPERM;

1237 1238 1239 1240
	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;

1241
	ret = pm_runtime_get_sync(ddev->dev);
1242 1243
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1244
		return ret;
1245
	}
1246

1247
	if (is_support_sw_smu(adev))
1248
		ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1249
	else if (adev->powerplay.pp_funcs->force_clock_level)
1250
		ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1251 1252 1253 1254 1255
	else
		ret = 0;

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
1256 1257 1258 1259 1260 1261 1262

	if (ret)
		return -EINVAL;

	return count;
}

1263 1264 1265 1266 1267 1268
static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1269 1270
	ssize_t size;
	int ret;
1271

1272
	if (amdgpu_in_reset(adev))
1273 1274
		return -EPERM;

1275
	ret = pm_runtime_get_sync(ddev->dev);
1276 1277
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1278
		return ret;
1279
	}
1280

1281
	if (is_support_sw_smu(adev))
1282
		size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1283
	else if (adev->powerplay.pp_funcs->print_clock_levels)
1284
		size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1285
	else
1286 1287 1288 1289 1290 1291
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
}

static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	uint32_t mask = 0;

1304
	if (amdgpu_in_reset(adev))
1305 1306
		return -EPERM;

1307 1308 1309 1310
	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;

1311
	ret = pm_runtime_get_sync(ddev->dev);
1312 1313
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1314
		return ret;
1315
	}
1316

1317
	if (is_support_sw_smu(adev))
1318
		ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1319
	else if (adev->powerplay.pp_funcs->force_clock_level)
1320
		ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1321 1322 1323 1324 1325
	else
		ret = 0;

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
1326 1327 1328 1329 1330 1331 1332

	if (ret)
		return -EINVAL;

	return count;
}

1333 1334 1335 1336 1337 1338
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1339 1340
	ssize_t size;
	int ret;
1341

1342
	if (amdgpu_in_reset(adev))
1343 1344
		return -EPERM;

1345
	ret = pm_runtime_get_sync(ddev->dev);
1346 1347
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1348
		return ret;
1349
	}
1350

1351
	if (is_support_sw_smu(adev))
1352
		size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1353
	else if (adev->powerplay.pp_funcs->print_clock_levels)
1354
		size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1355
	else
1356 1357 1358 1359 1360 1361
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
}

static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	uint32_t mask = 0;

1374
	if (amdgpu_in_reset(adev))
1375 1376
		return -EPERM;

1377 1378 1379 1380
	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;

1381
	ret = pm_runtime_get_sync(ddev->dev);
1382 1383
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1384
		return ret;
1385
	}
1386

1387
	if (is_support_sw_smu(adev))
1388
		ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1389
	else if (adev->powerplay.pp_funcs->force_clock_level)
1390
		ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1391 1392 1393 1394 1395
	else
		ret = 0;

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
1396 1397 1398 1399 1400 1401 1402

	if (ret)
		return -EINVAL;

	return count;
}

1403 1404 1405 1406 1407 1408
static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1409 1410
	ssize_t size;
	int ret;
1411

1412
	if (amdgpu_in_reset(adev))
1413 1414
		return -EPERM;

1415
	ret = pm_runtime_get_sync(ddev->dev);
1416 1417
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1418
		return ret;
1419
	}
1420

1421
	if (is_support_sw_smu(adev))
1422
		size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1423
	else if (adev->powerplay.pp_funcs->print_clock_levels)
1424
		size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1425
	else
1426 1427 1428 1429 1430 1431
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
}

static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
1442
	uint32_t mask = 0;
1443

1444
	if (amdgpu_in_reset(adev))
1445 1446
		return -EPERM;

K
Kees Cook 已提交
1447 1448 1449
	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
1450

1451
	ret = pm_runtime_get_sync(ddev->dev);
1452 1453
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1454
		return ret;
1455
	}
1456

1457
	if (is_support_sw_smu(adev))
1458
		ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1459
	else if (adev->powerplay.pp_funcs->force_clock_level)
1460
		ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1461 1462 1463 1464 1465
	else
		ret = 0;

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
1466 1467 1468

	if (ret)
		return -EINVAL;
1469

1470 1471 1472
	return count;
}

1473 1474 1475 1476 1477 1478 1479
static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	uint32_t value = 0;
1480
	int ret;
1481

1482
	if (amdgpu_in_reset(adev))
1483 1484
		return -EPERM;

1485
	ret = pm_runtime_get_sync(ddev->dev);
1486 1487
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1488
		return ret;
1489
	}
1490

1491
	if (is_support_sw_smu(adev))
1492
		value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1493
	else if (adev->powerplay.pp_funcs->get_sclk_od)
1494 1495
		value = amdgpu_dpm_get_sclk_od(adev);

1496 1497 1498
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
	return snprintf(buf, PAGE_SIZE, "%d\n", value);
}

static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	long int value;

1512
	if (amdgpu_in_reset(adev))
1513 1514
		return -EPERM;

1515 1516
	ret = kstrtol(buf, 0, &value);

1517 1518 1519 1520
	if (ret)
		return -EINVAL;

	ret = pm_runtime_get_sync(ddev->dev);
1521 1522
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1523
		return ret;
1524
	}
1525

1526
	if (is_support_sw_smu(adev)) {
1527
		value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1528
	} else {
1529 1530 1531 1532 1533 1534 1535 1536 1537
		if (adev->powerplay.pp_funcs->set_sclk_od)
			amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);

		if (adev->powerplay.pp_funcs->dispatch_tasks) {
			amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
		} else {
			adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
			amdgpu_pm_compute_clocks(adev);
		}
1538
	}
1539

1540 1541 1542
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1543 1544 1545
	return count;
}

1546 1547 1548 1549 1550 1551 1552
static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	uint32_t value = 0;
1553
	int ret;
1554

1555
	if (amdgpu_in_reset(adev))
1556 1557
		return -EPERM;

1558
	ret = pm_runtime_get_sync(ddev->dev);
1559 1560
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1561
		return ret;
1562
	}
1563

1564
	if (is_support_sw_smu(adev))
1565
		value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1566
	else if (adev->powerplay.pp_funcs->get_mclk_od)
1567 1568
		value = amdgpu_dpm_get_mclk_od(adev);

1569 1570 1571
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
	return snprintf(buf, PAGE_SIZE, "%d\n", value);
}

static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int ret;
	long int value;

1585
	if (amdgpu_in_reset(adev))
1586 1587
		return -EPERM;

1588 1589
	ret = kstrtol(buf, 0, &value);

1590 1591 1592 1593
	if (ret)
		return -EINVAL;

	ret = pm_runtime_get_sync(ddev->dev);
1594 1595
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1596
		return ret;
1597
	}
1598

1599
	if (is_support_sw_smu(adev)) {
1600
		value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1601
	} else {
1602 1603 1604 1605 1606 1607 1608 1609 1610
		if (adev->powerplay.pp_funcs->set_mclk_od)
			amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);

		if (adev->powerplay.pp_funcs->dispatch_tasks) {
			amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
		} else {
			adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
			amdgpu_pm_compute_clocks(adev);
		}
1611 1612
	}

1613 1614 1615
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1616 1617 1618
	return count;
}

1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
/**
 * DOC: pp_power_profile_mode
 *
 * The amdgpu driver provides a sysfs API for adjusting the heuristics
 * related to switching between power levels in a power state.  The file
 * pp_power_profile_mode is used for this.
 *
 * Reading this file outputs a list of all of the predefined power profiles
 * and the relevant heuristics settings for that profile.
 *
 * To select a profile or create a custom profile, first select manual using
 * power_dpm_force_performance_level.  Writing the number of a predefined
 * profile to pp_power_profile_mode will enable those heuristics.  To
 * create a custom set of heuristics, write a string of numbers to the file
 * starting with the number of the custom profile along with a setting
 * for each heuristic parameter.  Due to differences across asic families
 * the heuristic parameters vary from family to family.
 *
 */

1639 1640 1641 1642 1643 1644
static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1645 1646
	ssize_t size;
	int ret;
1647

1648
	if (amdgpu_in_reset(adev))
1649 1650
		return -EPERM;

1651
	ret = pm_runtime_get_sync(ddev->dev);
1652 1653
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1654
		return ret;
1655
	}
1656

1657
	if (is_support_sw_smu(adev))
1658
		size = smu_get_power_profile_mode(&adev->smu, buf);
1659
	else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1660 1661 1662 1663 1664 1665
		size = amdgpu_dpm_get_power_profile_mode(adev, buf);
	else
		size = snprintf(buf, PAGE_SIZE, "\n");

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);
1666

1667
	return size;
1668 1669 1670 1671 1672 1673 1674 1675
}


static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
1676
	int ret;
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	uint32_t parameter_size = 0;
	long parameter[64];
	char *sub_str, buf_cpy[128];
	char *tmp_str;
	uint32_t i = 0;
	char tmp[2];
	long int profile_mode = 0;
	const char delimiter[3] = {' ', '\n', '\0'};

1688
	if (amdgpu_in_reset(adev))
1689 1690
		return -EPERM;

1691 1692 1693 1694
	tmp[0] = *(buf);
	tmp[1] = '\0';
	ret = kstrtol(tmp, 0, &profile_mode);
	if (ret)
1695
		return -EINVAL;
1696 1697 1698 1699 1700 1701 1702 1703

	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
		if (count < 2 || count > 127)
			return -EINVAL;
		while (isspace(*++buf))
			i++;
		memcpy(buf_cpy, buf, count-i);
		tmp_str = buf_cpy;
1704 1705
		while (tmp_str[0]) {
			sub_str = strsep(&tmp_str, delimiter);
1706
			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1707 1708
			if (ret)
				return -EINVAL;
1709 1710 1711 1712 1713 1714
			parameter_size++;
			while (isspace(*tmp_str))
				tmp_str++;
		}
	}
	parameter[parameter_size] = profile_mode;
1715 1716

	ret = pm_runtime_get_sync(ddev->dev);
1717 1718
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1719
		return ret;
1720
	}
1721

1722
	if (is_support_sw_smu(adev))
1723
		ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1724
	else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1725
		ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1726 1727 1728 1729

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1730 1731
	if (!ret)
		return count;
1732

1733 1734 1735
	return -EINVAL;
}

1736
/**
1737
 * DOC: gpu_busy_percent
1738 1739 1740 1741 1742 1743
 *
 * The amdgpu driver provides a sysfs API for reading how busy the GPU
 * is as a percentage.  The file gpu_busy_percent is used for this.
 * The SMU firmware computes a percentage of load based on the
 * aggregate activity level in the IP cores.
 */
1744 1745 1746
static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
1747 1748 1749 1750 1751
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int r, value, size = sizeof(value);

1752
	if (amdgpu_in_reset(adev))
1753 1754
		return -EPERM;

1755
	r = pm_runtime_get_sync(ddev->dev);
1756 1757
	if (r < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1758
		return r;
1759
	}
1760

1761 1762 1763
	/* read the IP busy sensor */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
				   (void *)&value, &size);
1764

1765 1766 1767
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1768 1769 1770 1771 1772 1773
	if (r)
		return r;

	return snprintf(buf, PAGE_SIZE, "%d\n", value);
}

1774 1775 1776 1777 1778 1779 1780 1781
/**
 * DOC: mem_busy_percent
 *
 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
 * is as a percentage.  The file mem_busy_percent is used for this.
 * The SMU firmware computes a percentage of load based on the
 * aggregate activity level in the IP cores.
 */
1782 1783 1784
static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
1785 1786 1787 1788 1789
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	int r, value, size = sizeof(value);

1790
	if (amdgpu_in_reset(adev))
1791 1792
		return -EPERM;

1793
	r = pm_runtime_get_sync(ddev->dev);
1794 1795
	if (r < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1796
		return r;
1797
	}
1798

1799 1800 1801 1802
	/* read the IP busy sensor */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
				   (void *)&value, &size);

1803 1804 1805
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1806 1807 1808 1809 1810 1811
	if (r)
		return r;

	return snprintf(buf, PAGE_SIZE, "%d\n", value);
}

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
/**
 * DOC: pcie_bw
 *
 * The amdgpu driver provides a sysfs API for estimating how much data
 * has been received and sent by the GPU in the last second through PCIe.
 * The file pcie_bw is used for this.
 * The Perf counters count the number of received and sent messages and return
 * those values, as well as the maximum payload size of a PCIe packet (mps).
 * Note that it is not possible to easily and quickly obtain the size of each
 * packet transmitted, so we output the max payload size (mps) to allow for
 * quick estimation of the PCIe bandwidth usage
 */
static ssize_t amdgpu_get_pcie_bw(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
1830
	uint64_t count0 = 0, count1 = 0;
1831
	int ret;
1832

1833
	if (amdgpu_in_reset(adev))
1834 1835
		return -EPERM;

1836 1837 1838 1839 1840 1841
	if (adev->flags & AMD_IS_APU)
		return -ENODATA;

	if (!adev->asic_funcs->get_pcie_usage)
		return -ENODATA;

1842
	ret = pm_runtime_get_sync(ddev->dev);
1843 1844
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
1845
		return ret;
1846
	}
1847

1848
	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1849 1850 1851 1852

	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

1853 1854 1855 1856
	return snprintf(buf, PAGE_SIZE,	"%llu %llu %i\n",
			count0, count1, pcie_get_mps(adev->pdev));
}

1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
/**
 * DOC: unique_id
 *
 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
 * The file unique_id is used for this.
 * This will provide a Unique ID that will persist from machine to machine
 *
 * NOTE: This will only work for GFX9 and newer. This file will be absent
 * on unsupported ASICs (GFX8 and older)
 */
static ssize_t amdgpu_get_unique_id(struct device *dev,
		struct device_attribute *attr,
		char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

1874
	if (amdgpu_in_reset(adev))
1875 1876
		return -EPERM;

1877 1878 1879 1880 1881 1882
	if (adev->unique_id)
		return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);

	return 0;
}

1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
/**
 * DOC: thermal_throttling_logging
 *
 * Thermal throttling pulls down the clock frequency and thus the performance.
 * It's an useful mechanism to protect the chip from overheating. Since it
 * impacts performance, the user controls whether it is enabled and if so,
 * the log frequency.
 *
 * Reading back the file shows you the status(enabled or disabled) and
 * the interval(in seconds) between each thermal logging.
 *
 * Writing an integer to the file, sets a new logging interval, in seconds.
 * The value should be between 1 and 3600. If the value is less than 1,
 * thermal logging is disabled. Values greater than 3600 are ignored.
 */
static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
						     struct device_attribute *attr,
						     char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

	return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
			adev->ddev->unique,
			atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
			adev->throttling_logging_rs.interval / HZ + 1);
}

static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
						     struct device_attribute *attr,
						     const char *buf,
						     size_t count)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	long throttling_logging_interval;
	unsigned long flags;
	int ret = 0;

	ret = kstrtol(buf, 0, &throttling_logging_interval);
	if (ret)
		return ret;

	if (throttling_logging_interval > 3600)
		return -EINVAL;

	if (throttling_logging_interval > 0) {
		raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
		/*
		 * Reset the ratelimit timer internals.
		 * This can effectively restart the timer.
		 */
		adev->throttling_logging_rs.interval =
			(throttling_logging_interval - 1) * HZ;
		adev->throttling_logging_rs.begin = 0;
		adev->throttling_logging_rs.printed = 0;
		adev->throttling_logging_rs.missed = 0;
		raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);

		atomic_set(&adev->throttling_logging_enabled, 1);
	} else {
		atomic_set(&adev->throttling_logging_enabled, 0);
	}

	return count;
}

1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
/**
 * DOC: gpu_metrics
 *
 * The amdgpu driver provides a sysfs API for retrieving current gpu
 * metrics data. The file gpu_metrics is used for this. Reading the
 * file will dump all the current gpu metrics data.
 *
 * These data include temperature, frequency, engines utilization,
 * power consume, throttler status, fan speed and cpu core statistics(
 * available for APU only). That's it will give a snapshot of all sensors
 * at the same time.
 */
static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	void *gpu_metrics;
	ssize_t size = 0;
	int ret;

1972
	if (amdgpu_in_reset(adev))
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0) {
		pm_runtime_put_autosuspend(ddev->dev);
		return ret;
	}

	if (is_support_sw_smu(adev))
		size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
	else if (adev->powerplay.pp_funcs->get_gpu_metrics)
		size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);

	if (size <= 0)
		goto out;

	if (size >= PAGE_SIZE)
		size = PAGE_SIZE - 1;

	memcpy(buf, gpu_metrics, size);

out:
	pm_runtime_mark_last_busy(ddev->dev);
	pm_runtime_put_autosuspend(ddev->dev);

	return size;
}

2001 2002
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2003
	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC),
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC),
	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC),
2023
	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC),
2024
	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC),
2025 2026 2027
};

static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2028
			       uint32_t mask, enum amdgpu_device_attr_states *states)
2029 2030 2031 2032 2033 2034 2035
{
	struct device_attribute *dev_attr = &attr->dev_attr;
	const char *attr_name = dev_attr->attr.name;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
	enum amd_asic_type asic_type = adev->asic_type;

	if (!(attr->flags & mask)) {
2036
		*states = ATTR_STATE_UNSUPPORTED;
2037 2038 2039 2040 2041 2042
		return 0;
	}

#define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name))

	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2043
		if (asic_type < CHIP_VEGA10)
2044
			*states = ATTR_STATE_UNSUPPORTED;
2045
	} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2046
		if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
2047
			*states = ATTR_STATE_UNSUPPORTED;
2048 2049
	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
		if (asic_type < CHIP_VEGA20)
2050
			*states = ATTR_STATE_UNSUPPORTED;
2051 2052
	} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
		if (asic_type == CHIP_ARCTURUS)
2053
			*states = ATTR_STATE_UNSUPPORTED;
2054
	} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2055
		*states = ATTR_STATE_UNSUPPORTED;
2056 2057
		if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
		    (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2058
			*states = ATTR_STATE_SUPPORTED;
2059 2060
	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
		if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2061
			*states = ATTR_STATE_UNSUPPORTED;
2062 2063 2064
	} else if (DEVICE_ATTR_IS(pcie_bw)) {
		/* PCIe Perf counters won't work on APU nodes */
		if (adev->flags & AMD_IS_APU)
2065
			*states = ATTR_STATE_UNSUPPORTED;
2066
	} else if (DEVICE_ATTR_IS(unique_id)) {
2067 2068 2069
		if (asic_type != CHIP_VEGA10 &&
		    asic_type != CHIP_VEGA20 &&
		    asic_type != CHIP_ARCTURUS)
2070
			*states = ATTR_STATE_UNSUPPORTED;
2071
	} else if (DEVICE_ATTR_IS(pp_features)) {
2072
		if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2073
			*states = ATTR_STATE_UNSUPPORTED;
2074 2075 2076
	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
		if (asic_type < CHIP_VEGA12)
			*states = ATTR_STATE_UNSUPPORTED;
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
	}

	if (asic_type == CHIP_ARCTURUS) {
		/* Arcturus does not support standalone mclk/socclk/fclk level setting */
		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
			dev_attr->attr.mode &= ~S_IWUGO;
			dev_attr->store = NULL;
		}
	}

#undef DEVICE_ATTR_IS

	return 0;
}


static int amdgpu_device_attr_create(struct amdgpu_device *adev,
				     struct amdgpu_device_attr *attr,
2097
				     uint32_t mask, struct list_head *attr_list)
2098 2099 2100 2101
{
	int ret = 0;
	struct device_attribute *dev_attr = &attr->dev_attr;
	const char *name = dev_attr->attr.name;
2102 2103 2104
	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
	struct amdgpu_device_attr_entry *attr_entry;

2105
	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2106
			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2107 2108 2109 2110 2111

	BUG_ON(!attr);

	attr_update = attr->attr_update ? attr_update : default_attr_update;

2112
	ret = attr_update(adev, attr, mask, &attr_states);
2113 2114 2115 2116 2117 2118
	if (ret) {
		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
			name, ret);
		return ret;
	}

2119
	if (attr_states == ATTR_STATE_UNSUPPORTED)
2120 2121 2122 2123 2124 2125 2126 2127
		return 0;

	ret = device_create_file(adev->dev, dev_attr);
	if (ret) {
		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
			name, ret);
	}

2128 2129 2130 2131 2132 2133 2134 2135
	attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
	if (!attr_entry)
		return -ENOMEM;

	attr_entry->attr = attr;
	INIT_LIST_HEAD(&attr_entry->entry);

	list_add_tail(&attr_entry->entry, attr_list);
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146

	return ret;
}

static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
{
	struct device_attribute *dev_attr = &attr->dev_attr;

	device_remove_file(adev->dev, dev_attr);
}

2147 2148 2149
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
					     struct list_head *attr_list);

2150 2151 2152
static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
					    struct amdgpu_device_attr *attrs,
					    uint32_t counts,
2153 2154
					    uint32_t mask,
					    struct list_head *attr_list)
2155 2156 2157 2158 2159
{
	int ret = 0;
	uint32_t i = 0;

	for (i = 0; i < counts; i++) {
2160
		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2161 2162 2163 2164 2165 2166 2167
		if (ret)
			goto failed;
	}

	return 0;

failed:
2168
	amdgpu_device_attr_remove_groups(adev, attr_list);
2169 2170 2171 2172 2173

	return ret;
}

static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2174
					     struct list_head *attr_list)
2175
{
2176
	struct amdgpu_device_attr_entry *entry, *entry_tmp;
2177

2178 2179 2180 2181 2182 2183 2184 2185
	if (list_empty(attr_list))
		return ;

	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
		amdgpu_device_attr_remove(adev, entry->attr);
		list_del(&entry->entry);
		kfree(entry);
	}
2186
}
2187

A
Alex Deucher 已提交
2188 2189 2190 2191 2192
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
2193
	int channel = to_sensor_dev_attr(attr)->index;
2194
	int r, temp = 0, size = sizeof(temp);
A
Alex Deucher 已提交
2195

2196
	if (amdgpu_in_reset(adev))
2197 2198
		return -EPERM;

2199 2200 2201
	if (channel >= PP_TEMP_MAX)
		return -EINVAL;

2202
	r = pm_runtime_get_sync(adev->ddev->dev);
2203 2204
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2205
		return r;
2206
	}
2207

2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
	switch (channel) {
	case PP_TEMP_JUNCTION:
		/* get current junction temperature */
		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
					   (void *)&temp, &size);
		break;
	case PP_TEMP_EDGE:
		/* get current edge temperature */
		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
					   (void *)&temp, &size);
		break;
	case PP_TEMP_MEM:
		/* get current memory temperature */
		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
					   (void *)&temp, &size);
2223 2224 2225
		break;
	default:
		r = -EINVAL;
2226 2227
		break;
	}
A
Alex Deucher 已提交
2228

2229 2230 2231 2232 2233 2234
	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	if (r)
		return r;

A
Alex Deucher 已提交
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}

static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
					     struct device_attribute *attr,
					     char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int hyst = to_sensor_dev_attr(attr)->index;
	int temp;

	if (hyst)
		temp = adev->pm.dpm.thermal.min_temp;
	else
		temp = adev->pm.dpm.thermal.max_temp;

	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}

2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
					     struct device_attribute *attr,
					     char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int hyst = to_sensor_dev_attr(attr)->index;
	int temp;

	if (hyst)
		temp = adev->pm.dpm.thermal.min_hotspot_temp;
	else
		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;

	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}

static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
					     struct device_attribute *attr,
					     char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int hyst = to_sensor_dev_attr(attr)->index;
	int temp;

	if (hyst)
		temp = adev->pm.dpm.thermal.min_mem_temp;
	else
		temp = adev->pm.dpm.thermal.max_mem_crit_temp;

	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
					     struct device_attribute *attr,
					     char *buf)
{
	int channel = to_sensor_dev_attr(attr)->index;

	if (channel >= PP_TEMP_MAX)
		return -EINVAL;

	return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
}

2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
					     struct device_attribute *attr,
					     char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int channel = to_sensor_dev_attr(attr)->index;
	int temp = 0;

	if (channel >= PP_TEMP_MAX)
		return -EINVAL;

	switch (channel) {
	case PP_TEMP_JUNCTION:
		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
		break;
	case PP_TEMP_EDGE:
		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
		break;
	case PP_TEMP_MEM:
		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
		break;
	}

	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}

A
Alex Deucher 已提交
2324 2325 2326 2327 2328 2329
static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
					    struct device_attribute *attr,
					    char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	u32 pwm_mode = 0;
2330 2331
	int ret;

2332
	if (amdgpu_in_reset(adev))
2333 2334
		return -EPERM;

2335
	ret = pm_runtime_get_sync(adev->ddev->dev);
2336 2337
	if (ret < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2338
		return ret;
2339
	}
2340

2341 2342 2343
	if (is_support_sw_smu(adev)) {
		pwm_mode = smu_get_fan_control_mode(&adev->smu);
	} else {
2344 2345 2346
		if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
			pm_runtime_mark_last_busy(adev->ddev->dev);
			pm_runtime_put_autosuspend(adev->ddev->dev);
2347
			return -EINVAL;
2348
		}
A
Alex Deucher 已提交
2349

2350 2351
		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
	}
A
Alex Deucher 已提交
2352

2353 2354 2355
	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2356
	return sprintf(buf, "%i\n", pwm_mode);
A
Alex Deucher 已提交
2357 2358 2359 2360 2361 2362 2363 2364
}

static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf,
					    size_t count)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
2365
	int err, ret;
A
Alex Deucher 已提交
2366 2367
	int value;

2368
	if (amdgpu_in_reset(adev))
2369 2370
		return -EPERM;

2371 2372 2373
	err = kstrtoint(buf, 10, &value);
	if (err)
		return err;
2374

2375
	ret = pm_runtime_get_sync(adev->ddev->dev);
2376 2377
	if (ret < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2378
		return ret;
2379
	}
2380

2381
	if (is_support_sw_smu(adev)) {
2382 2383
		smu_set_fan_control_mode(&adev->smu, value);
	} else {
2384 2385 2386
		if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
			pm_runtime_mark_last_busy(adev->ddev->dev);
			pm_runtime_put_autosuspend(adev->ddev->dev);
2387
			return -EINVAL;
2388
		}
A
Alex Deucher 已提交
2389

2390 2391
		amdgpu_dpm_set_fan_control_mode(adev, value);
	}
A
Alex Deucher 已提交
2392

2393 2394 2395
	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

A
Alex Deucher 已提交
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
	return count;
}

static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	return sprintf(buf, "%i\n", 0);
}

static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	return sprintf(buf, "%i\n", 255);
}

static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
	u32 value;
2420
	u32 pwm_mode;
A
Alex Deucher 已提交
2421

2422
	if (amdgpu_in_reset(adev))
2423 2424
		return -EPERM;

2425
	err = pm_runtime_get_sync(adev->ddev->dev);
2426 2427
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2428
		return err;
2429
	}
2430

2431 2432 2433 2434
	if (is_support_sw_smu(adev))
		pwm_mode = smu_get_fan_control_mode(&adev->smu);
	else
		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2435

2436 2437
	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
		pr_info("manual fan speed control should be enabled first\n");
2438 2439
		pm_runtime_mark_last_busy(adev->ddev->dev);
		pm_runtime_put_autosuspend(adev->ddev->dev);
2440 2441 2442
		return -EINVAL;
	}

A
Alex Deucher 已提交
2443
	err = kstrtou32(buf, 10, &value);
2444 2445 2446
	if (err) {
		pm_runtime_mark_last_busy(adev->ddev->dev);
		pm_runtime_put_autosuspend(adev->ddev->dev);
A
Alex Deucher 已提交
2447
		return err;
2448
	}
A
Alex Deucher 已提交
2449 2450 2451

	value = (value * 100) / 255;

2452
	if (is_support_sw_smu(adev))
2453
		err = smu_set_fan_speed_percent(&adev->smu, value);
2454
	else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2455
		err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2456 2457 2458 2459 2460 2461 2462 2463
	else
		err = -EINVAL;

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	if (err)
		return err;
A
Alex Deucher 已提交
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473

	return count;
}

static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
2474
	u32 speed = 0;
A
Alex Deucher 已提交
2475

2476
	if (amdgpu_in_reset(adev))
2477 2478
		return -EPERM;

2479
	err = pm_runtime_get_sync(adev->ddev->dev);
2480 2481
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2482
		return err;
2483
	}
2484

2485
	if (is_support_sw_smu(adev))
2486
		err = smu_get_fan_speed_percent(&adev->smu, &speed);
2487
	else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2488
		err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2489 2490 2491 2492 2493 2494 2495 2496
	else
		err = -EINVAL;

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	if (err)
		return err;
A
Alex Deucher 已提交
2497 2498 2499 2500 2501 2502

	speed = (speed * 255) / 100;

	return sprintf(buf, "%i\n", speed);
}

2503 2504 2505 2506 2507 2508
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
2509
	u32 speed = 0;
2510

2511
	if (amdgpu_in_reset(adev))
2512 2513
		return -EPERM;

2514
	err = pm_runtime_get_sync(adev->ddev->dev);
2515 2516
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2517
		return err;
2518
	}
2519

2520
	if (is_support_sw_smu(adev))
2521
		err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2522
	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2523
		err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2524 2525 2526 2527 2528 2529 2530 2531
	else
		err = -EINVAL;

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	if (err)
		return err;
2532 2533 2534 2535

	return sprintf(buf, "%i\n", speed);
}

2536 2537 2538 2539 2540 2541 2542 2543 2544
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	u32 min_rpm = 0;
	u32 size = sizeof(min_rpm);
	int r;

2545
	if (amdgpu_in_reset(adev))
2546 2547
		return -EPERM;

2548
	r = pm_runtime_get_sync(adev->ddev->dev);
2549 2550
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2551
		return r;
2552
	}
2553

2554 2555
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
				   (void *)&min_rpm, &size);
2556 2557 2558 2559

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
	if (r)
		return r;

	return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
}

static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	u32 max_rpm = 0;
	u32 size = sizeof(max_rpm);
	int r;

2575
	if (amdgpu_in_reset(adev))
2576 2577
		return -EPERM;

2578
	r = pm_runtime_get_sync(adev->ddev->dev);
2579 2580
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2581
		return r;
2582
	}
2583

2584 2585
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
				   (void *)&max_rpm, &size);
2586 2587 2588 2589

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603
	if (r)
		return r;

	return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
}

static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
	u32 rpm = 0;

2604
	if (amdgpu_in_reset(adev))
2605 2606
		return -EPERM;

2607
	err = pm_runtime_get_sync(adev->ddev->dev);
2608 2609
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2610
		return err;
2611
	}
2612

2613
	if (is_support_sw_smu(adev))
2614
		err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2615
	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2616
		err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2617 2618 2619 2620 2621 2622 2623 2624
	else
		err = -EINVAL;

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	if (err)
		return err;
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637

	return sprintf(buf, "%i\n", rpm);
}

static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
	u32 value;
	u32 pwm_mode;

2638
	if (amdgpu_in_reset(adev))
2639 2640
		return -EPERM;

2641
	err = pm_runtime_get_sync(adev->ddev->dev);
2642 2643
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2644
		return err;
2645
	}
2646

2647 2648 2649 2650 2651
	if (is_support_sw_smu(adev))
		pwm_mode = smu_get_fan_control_mode(&adev->smu);
	else
		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);

2652 2653 2654
	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
		pm_runtime_mark_last_busy(adev->ddev->dev);
		pm_runtime_put_autosuspend(adev->ddev->dev);
2655
		return -ENODATA;
2656
	}
2657 2658

	err = kstrtou32(buf, 10, &value);
2659 2660 2661
	if (err) {
		pm_runtime_mark_last_busy(adev->ddev->dev);
		pm_runtime_put_autosuspend(adev->ddev->dev);
2662
		return err;
2663
	}
2664

2665
	if (is_support_sw_smu(adev))
2666
		err = smu_set_fan_speed_rpm(&adev->smu, value);
2667
	else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2668
		err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2669 2670 2671 2672 2673 2674 2675 2676
	else
		err = -EINVAL;

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	if (err)
		return err;
2677 2678 2679 2680 2681 2682 2683 2684 2685 2686

	return count;
}

static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
					    struct device_attribute *attr,
					    char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	u32 pwm_mode = 0;
2687 2688
	int ret;

2689
	if (amdgpu_in_reset(adev))
2690 2691
		return -EPERM;

2692
	ret = pm_runtime_get_sync(adev->ddev->dev);
2693 2694
	if (ret < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2695
		return ret;
2696
	}
2697

2698 2699 2700
	if (is_support_sw_smu(adev)) {
		pwm_mode = smu_get_fan_control_mode(&adev->smu);
	} else {
2701 2702 2703
		if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
			pm_runtime_mark_last_busy(adev->ddev->dev);
			pm_runtime_put_autosuspend(adev->ddev->dev);
2704
			return -EINVAL;
2705
		}
2706

2707 2708
		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
	}
2709 2710 2711 2712

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
	return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}

static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf,
					    size_t count)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
	int value;
	u32 pwm_mode;

2726
	if (amdgpu_in_reset(adev))
2727 2728
		return -EPERM;

2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
	err = kstrtoint(buf, 10, &value);
	if (err)
		return err;

	if (value == 0)
		pwm_mode = AMD_FAN_CTRL_AUTO;
	else if (value == 1)
		pwm_mode = AMD_FAN_CTRL_MANUAL;
	else
		return -EINVAL;

2740
	err = pm_runtime_get_sync(adev->ddev->dev);
2741 2742
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2743
		return err;
2744
	}
2745

2746 2747 2748
	if (is_support_sw_smu(adev)) {
		smu_set_fan_control_mode(&adev->smu, pwm_mode);
	} else {
2749 2750 2751
		if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
			pm_runtime_mark_last_busy(adev->ddev->dev);
			pm_runtime_put_autosuspend(adev->ddev->dev);
2752
			return -EINVAL;
2753
		}
2754 2755
		amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
	}
2756

2757 2758 2759
	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2760 2761 2762
	return count;
}

2763 2764 2765 2766 2767 2768 2769 2770
static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	u32 vddgfx;
	int r, size = sizeof(vddgfx);

2771
	if (amdgpu_in_reset(adev))
2772 2773
		return -EPERM;

2774
	r = pm_runtime_get_sync(adev->ddev->dev);
2775 2776
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2777
		return r;
2778
	}
2779 2780 2781 2782

	/* get the voltage */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
				   (void *)&vddgfx, &size);
2783 2784 2785 2786

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
	if (r)
		return r;

	return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
}

static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
					      struct device_attribute *attr,
					      char *buf)
{
	return snprintf(buf, PAGE_SIZE, "vddgfx\n");
}

static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
				       struct device_attribute *attr,
				       char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	u32 vddnb;
	int r, size = sizeof(vddnb);

2808
	if (amdgpu_in_reset(adev))
2809 2810
		return -EPERM;

2811
	/* only APUs have vddnb */
R
Rex Zhu 已提交
2812
	if  (!(adev->flags & AMD_IS_APU))
2813 2814
		return -EINVAL;

2815
	r = pm_runtime_get_sync(adev->ddev->dev);
2816 2817
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2818
		return r;
2819
	}
2820 2821 2822 2823

	/* get the voltage */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
				   (void *)&vddnb, &size);
2824 2825 2826 2827

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
	if (r)
		return r;

	return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
}

static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
					      struct device_attribute *attr,
					      char *buf)
{
	return snprintf(buf, PAGE_SIZE, "vddnb\n");
}

2841 2842 2843 2844 2845
static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
R
Rex Zhu 已提交
2846 2847
	u32 query = 0;
	int r, size = sizeof(u32);
2848 2849
	unsigned uw;

2850
	if (amdgpu_in_reset(adev))
2851 2852
		return -EPERM;

2853
	r = pm_runtime_get_sync(adev->ddev->dev);
2854 2855
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2856
		return r;
2857
	}
2858 2859 2860 2861

	/* get the voltage */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
				   (void *)&query, &size);
2862 2863 2864 2865

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

2866 2867 2868 2869
	if (r)
		return r;

	/* convert to microwatts */
R
Rex Zhu 已提交
2870
	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2871 2872 2873 2874

	return snprintf(buf, PAGE_SIZE, "%u\n", uw);
}

2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	return sprintf(buf, "%i\n", 0);
}

static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	uint32_t limit = 0;
2888 2889 2890
	ssize_t size;
	int r;

2891
	if (amdgpu_in_reset(adev))
2892 2893
		return -EPERM;

2894
	r = pm_runtime_get_sync(adev->ddev->dev);
2895 2896
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2897
		return r;
2898
	}
2899

2900
	if (is_support_sw_smu(adev)) {
2901
		smu_get_power_limit(&adev->smu, &limit, true);
2902
		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2903
	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2904
		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2905
		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2906
	} else {
2907
		size = snprintf(buf, PAGE_SIZE, "\n");
2908
	}
2909 2910 2911 2912 2913

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	return size;
2914 2915 2916 2917 2918 2919 2920 2921
}

static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	uint32_t limit = 0;
2922 2923 2924
	ssize_t size;
	int r;

2925
	if (amdgpu_in_reset(adev))
2926 2927
		return -EPERM;

2928
	r = pm_runtime_get_sync(adev->ddev->dev);
2929 2930
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2931
		return r;
2932
	}
2933

2934
	if (is_support_sw_smu(adev)) {
2935
		smu_get_power_limit(&adev->smu, &limit, false);
2936
		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2937
	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2938
		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2939
		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2940
	} else {
2941
		size = snprintf(buf, PAGE_SIZE, "\n");
2942
	}
2943 2944 2945 2946 2947

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

	return size;
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
}


static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t count)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	int err;
	u32 value;

2960
	if (amdgpu_in_reset(adev))
2961 2962
		return -EPERM;

2963 2964 2965
	if (amdgpu_sriov_vf(adev))
		return -EINVAL;

2966 2967 2968 2969 2970
	err = kstrtou32(buf, 10, &value);
	if (err)
		return err;

	value = value / 1000000; /* convert to Watt */
2971

2972 2973

	err = pm_runtime_get_sync(adev->ddev->dev);
2974 2975
	if (err < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
2976
		return err;
2977
	}
2978 2979

	if (is_support_sw_smu(adev))
2980
		err = smu_set_power_limit(&adev->smu, value);
2981
	else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2982
		err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2983
	else
2984
		err = -EINVAL;
2985 2986 2987

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);
2988

2989 2990 2991
	if (err)
		return err;

2992 2993 2994
	return count;
}

2995 2996 2997 2998 2999 3000 3001 3002
static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	uint32_t sclk;
	int r, size = sizeof(sclk);

3003
	if (amdgpu_in_reset(adev))
3004 3005
		return -EPERM;

3006
	r = pm_runtime_get_sync(adev->ddev->dev);
3007 3008
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
3009
		return r;
3010
	}
3011 3012 3013 3014

	/* get the sclk */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
				   (void *)&sclk, &size);
3015 3016 3017 3018

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

3019 3020 3021
	if (r)
		return r;

3022
	return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039
}

static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
					    struct device_attribute *attr,
					    char *buf)
{
	return snprintf(buf, PAGE_SIZE, "sclk\n");
}

static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	uint32_t mclk;
	int r, size = sizeof(mclk);

3040
	if (amdgpu_in_reset(adev))
3041 3042
		return -EPERM;

3043
	r = pm_runtime_get_sync(adev->ddev->dev);
3044 3045
	if (r < 0) {
		pm_runtime_put_autosuspend(adev->ddev->dev);
3046
		return r;
3047
	}
3048 3049 3050 3051

	/* get the sclk */
	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
				   (void *)&mclk, &size);
3052 3053 3054 3055

	pm_runtime_mark_last_busy(adev->ddev->dev);
	pm_runtime_put_autosuspend(adev->ddev->dev);

3056 3057 3058
	if (r)
		return r;

3059
	return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
3060 3061 3062 3063 3064 3065 3066 3067
}

static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
					    struct device_attribute *attr,
					    char *buf)
{
	return snprintf(buf, PAGE_SIZE, "mclk\n");
}
3068 3069 3070 3071 3072

/**
 * DOC: hwmon
 *
 * The amdgpu driver exposes the following sensor interfaces:
3073
 *
3074
 * - GPU temperature (via the on-die sensor)
3075
 *
3076
 * - GPU voltage
3077
 *
3078
 * - Northbridge voltage (APUs only)
3079
 *
3080
 * - GPU power
3081
 *
3082 3083
 * - GPU fan
 *
3084 3085 3086 3087
 * - GPU gfx/compute engine clock
 *
 * - GPU memory clock (dGPU only)
 *
3088
 * hwmon interfaces for GPU temperature:
3089
 *
3090 3091
 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
 *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3092
 *
3093 3094 3095
 * - temp[1-3]_label: temperature channel label
 *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
 *
3096 3097
 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
 *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3098
 *
3099 3100
 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
 *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3101
 *
3102 3103 3104
 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
 *   - these are supported on SOC15 dGPUs only
 *
3105
 * hwmon interfaces for GPU voltage:
3106
 *
3107
 * - in0_input: the voltage on the GPU in millivolts
3108
 *
3109 3110 3111
 * - in1_input: the voltage on the Northbridge in millivolts
 *
 * hwmon interfaces for GPU power:
3112
 *
3113
 * - power1_average: average power used by the GPU in microWatts
3114
 *
3115
 * - power1_cap_min: minimum cap supported in microWatts
3116
 *
3117
 * - power1_cap_max: maximum cap supported in microWatts
3118
 *
3119 3120 3121
 * - power1_cap: selected power cap in microWatts
 *
 * hwmon interfaces for GPU fan:
3122
 *
3123
 * - pwm1: pulse width modulation fan level (0-255)
3124 3125 3126
 *
 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
 *
3127
 * - pwm1_min: pulse width modulation fan control minimum level (0)
3128
 *
3129
 * - pwm1_max: pulse width modulation fan control maximum level (255)
3130
 *
3131 3132 3133 3134
 * - fan1_min: an minimum value Unit: revolution/min (RPM)
 *
 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
 *
3135 3136
 * - fan1_input: fan speed in RPM
 *
3137
 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3138
 *
3139
 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3140
 *
3141 3142 3143 3144 3145 3146
 * hwmon interfaces for GPU clocks:
 *
 * - freq1_input: the gfx/compute clock in hertz
 *
 * - freq2_input: the memory clock in hertz
 *
3147 3148 3149 3150
 * You can use hwmon tools like sensors to view this information on your system.
 *
 */

3151
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
A
Alex Deucher 已提交
3152 3153
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3154
static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3155
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3156 3157
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3158
static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3159
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3160 3161
static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3162
static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3163 3164 3165
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
A
Alex Deucher 已提交
3166 3167 3168 3169
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3170
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3171 3172 3173 3174
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3175 3176 3177 3178
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3179
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3180 3181 3182
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3183 3184 3185 3186
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
A
Alex Deucher 已提交
3187 3188 3189 3190 3191

static struct attribute *hwmon_attributes[] = {
	&sensor_dev_attr_temp1_input.dev_attr.attr,
	&sensor_dev_attr_temp1_crit.dev_attr.attr,
	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3192
	&sensor_dev_attr_temp2_input.dev_attr.attr,
3193 3194
	&sensor_dev_attr_temp2_crit.dev_attr.attr,
	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3195
	&sensor_dev_attr_temp3_input.dev_attr.attr,
3196 3197
	&sensor_dev_attr_temp3_crit.dev_attr.attr,
	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3198 3199 3200
	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3201 3202 3203
	&sensor_dev_attr_temp1_label.dev_attr.attr,
	&sensor_dev_attr_temp2_label.dev_attr.attr,
	&sensor_dev_attr_temp3_label.dev_attr.attr,
A
Alex Deucher 已提交
3204 3205 3206 3207
	&sensor_dev_attr_pwm1.dev_attr.attr,
	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
	&sensor_dev_attr_pwm1_min.dev_attr.attr,
	&sensor_dev_attr_pwm1_max.dev_attr.attr,
3208
	&sensor_dev_attr_fan1_input.dev_attr.attr,
3209 3210 3211 3212
	&sensor_dev_attr_fan1_min.dev_attr.attr,
	&sensor_dev_attr_fan1_max.dev_attr.attr,
	&sensor_dev_attr_fan1_target.dev_attr.attr,
	&sensor_dev_attr_fan1_enable.dev_attr.attr,
3213 3214 3215 3216
	&sensor_dev_attr_in0_input.dev_attr.attr,
	&sensor_dev_attr_in0_label.dev_attr.attr,
	&sensor_dev_attr_in1_input.dev_attr.attr,
	&sensor_dev_attr_in1_label.dev_attr.attr,
3217
	&sensor_dev_attr_power1_average.dev_attr.attr,
3218 3219 3220
	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
	&sensor_dev_attr_power1_cap.dev_attr.attr,
3221 3222 3223 3224
	&sensor_dev_attr_freq1_input.dev_attr.attr,
	&sensor_dev_attr_freq1_label.dev_attr.attr,
	&sensor_dev_attr_freq2_input.dev_attr.attr,
	&sensor_dev_attr_freq2_label.dev_attr.attr,
A
Alex Deucher 已提交
3225 3226 3227 3228 3229 3230
	NULL
};

static umode_t hwmon_attributes_visible(struct kobject *kobj,
					struct attribute *attr, int index)
{
G
Geliang Tang 已提交
3231
	struct device *dev = kobj_to_dev(kobj);
A
Alex Deucher 已提交
3232 3233 3234
	struct amdgpu_device *adev = dev_get_drvdata(dev);
	umode_t effective_mode = attr->mode;

3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251
	/* under multi-vf mode, the hwmon attributes are all not supported */
	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
		return 0;

	/* there is no fan under pp one vf mode */
	if (amdgpu_sriov_is_pp_one_vf(adev) &&
	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
		return 0;

3252 3253 3254 3255 3256
	/* Skip fan attributes if fan is not present */
	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3257 3258 3259 3260 3261
	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3262
		return 0;
3263

3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
	/* Skip fan attributes on APU */
	if ((adev->flags & AMD_IS_APU) &&
	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
		return 0;

3277 3278 3279 3280 3281 3282
	/* Skip crit temp on APU */
	if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
		return 0;

3283
	/* Skip limit attributes if DPM is not enabled */
A
Alex Deucher 已提交
3284 3285
	if (!adev->pm.dpm_enabled &&
	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3286 3287 3288 3289
	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3290 3291 3292 3293 3294 3295
	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
A
Alex Deucher 已提交
3296 3297
		return 0;

3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
	if (!is_support_sw_smu(adev)) {
		/* mask fan attributes if we have no bindings for this asic to expose */
		if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
		     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
		    (!adev->powerplay.pp_funcs->get_fan_control_mode &&
		     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
			effective_mode &= ~S_IRUGO;

		if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
		     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
		    (!adev->powerplay.pp_funcs->set_fan_control_mode &&
		     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
			effective_mode &= ~S_IWUSR;
	}
A
Alex Deucher 已提交
3312

3313
	if (((adev->flags & AMD_IS_APU) ||
3314
	     adev->family == AMDGPU_FAMILY_SI) &&	/* not implemented yet */
3315
	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3316 3317 3318 3319
	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
		return 0;

3320 3321 3322 3323 3324 3325
	if (((adev->family == AMDGPU_FAMILY_SI) ||
	     ((adev->flags & AMD_IS_APU) &&
	      (adev->asic_type < CHIP_RENOIR))) &&	/* not implemented yet */
	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
		return 0;

3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
	if (!is_support_sw_smu(adev)) {
		/* hide max/min values if we can't both query and manage the fan */
		if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
		     !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
		     (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
		     !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
		    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
		     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
			return 0;

		if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
		     !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
		    (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
		     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
			return 0;
	}
3342

3343 3344 3345 3346 3347 3348
	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
	     adev->family == AMDGPU_FAMILY_KV) &&	/* not implemented yet */
	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
		return 0;

3349 3350 3351 3352
	/* only APUs have vddnb */
	if (!(adev->flags & AMD_IS_APU) &&
	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3353 3354
		return 0;

3355 3356 3357 3358 3359 3360
	/* no mclk on APUs */
	if ((adev->flags & AMD_IS_APU) &&
	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
		return 0;

3361 3362 3363 3364 3365 3366
	/* only SOC15 dGPUs support hotspot and mem temperatures */
	if (((adev->flags & AMD_IS_APU) ||
	     adev->asic_type < CHIP_VEGA10) &&
	    (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3367 3368 3369
	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3370 3371
	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3372 3373 3374
	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3375 3376
		return 0;

A
Alex Deucher 已提交
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392
	return effective_mode;
}

static const struct attribute_group hwmon_attrgroup = {
	.attrs = hwmon_attributes,
	.is_visible = hwmon_attributes_visible,
};

static const struct attribute_group *hwmon_groups[] = {
	&hwmon_attrgroup,
	NULL
};

int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
	int ret;
3393
	uint32_t mask = 0;
A
Alex Deucher 已提交
3394

3395 3396 3397
	if (adev->pm.sysfs_initialized)
		return 0;

3398 3399 3400
	if (adev->pm.dpm_enabled == 0)
		return 0;

3401 3402
	INIT_LIST_HEAD(&adev->pm.pm_attr_list);

A
Alex Deucher 已提交
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412
	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
								   DRIVER_NAME, adev,
								   hwmon_groups);
	if (IS_ERR(adev->pm.int_hwmon_dev)) {
		ret = PTR_ERR(adev->pm.int_hwmon_dev);
		dev_err(adev->dev,
			"Unable to register hwmon device: %d\n", ret);
		return ret;
	}

3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423
	switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
	case SRIOV_VF_MODE_ONE_VF:
		mask = ATTR_FLAG_ONEVF;
		break;
	case SRIOV_VF_MODE_MULTI_VF:
		mask = 0;
		break;
	case SRIOV_VF_MODE_BARE_METAL:
	default:
		mask = ATTR_FLAG_MASK_ALL;
		break;
3424 3425
	}

3426 3427 3428
	ret = amdgpu_device_attr_create_groups(adev,
					       amdgpu_device_attrs,
					       ARRAY_SIZE(amdgpu_device_attrs),
3429 3430
					       mask,
					       &adev->pm.pm_attr_list);
3431
	if (ret)
3432
		return ret;
3433

3434 3435
	adev->pm.sysfs_initialized = true;

A
Alex Deucher 已提交
3436 3437 3438 3439 3440
	return 0;
}

void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
3441 3442 3443
	if (adev->pm.dpm_enabled == 0)
		return;

A
Alex Deucher 已提交
3444 3445
	if (adev->pm.int_hwmon_dev)
		hwmon_device_unregister(adev->pm.int_hwmon_dev);
3446

3447
	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
A
Alex Deucher 已提交
3448 3449 3450 3451 3452 3453 3454
}

/*
 * Debugfs info
 */
#if defined(CONFIG_DEBUG_FS)

3455 3456
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
3457
	uint32_t value;
3458
	uint64_t value64;
R
Rex Zhu 已提交
3459
	uint32_t query = 0;
3460
	int size;
3461 3462

	/* GPU Clocks */
3463
	size = sizeof(value);
3464
	seq_printf(m, "GFX Clocks and Power:\n");
3465
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3466
		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3467
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3468
		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3469 3470 3471 3472
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3473
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3474
		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3475
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3476
		seq_printf(m, "\t%u mV (VDDNB)\n", value);
R
Rex Zhu 已提交
3477 3478 3479
	size = sizeof(uint32_t);
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
		seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3480
	size = sizeof(value);
3481 3482 3483
	seq_printf(m, "\n");

	/* GPU Temp */
3484
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3485 3486 3487
		seq_printf(m, "GPU Temperature: %u C\n", value/1000);

	/* GPU Load */
3488
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3489
		seq_printf(m, "GPU Load: %u %%\n", value);
3490 3491 3492 3493
	/* MEM Load */
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
		seq_printf(m, "MEM Load: %u %%\n", value);

3494 3495
	seq_printf(m, "\n");

3496 3497 3498 3499
	/* SMC feature mask */
	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);

3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511
	if (adev->asic_type > CHIP_VEGA20) {
		/* VCN clocks */
		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
			if (!value) {
				seq_printf(m, "VCN: Disabled\n");
			} else {
				seq_printf(m, "VCN: Enabled\n");
				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
			}
3512
		}
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527
		seq_printf(m, "\n");
	} else {
		/* UVD clocks */
		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
			if (!value) {
				seq_printf(m, "UVD: Disabled\n");
			} else {
				seq_printf(m, "UVD: Enabled\n");
				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
			}
		}
		seq_printf(m, "\n");
3528

3529 3530 3531 3532 3533 3534 3535 3536 3537
		/* VCE clocks */
		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
			if (!value) {
				seq_printf(m, "VCE: Disabled\n");
			} else {
				seq_printf(m, "VCE: Enabled\n");
				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
			}
3538 3539 3540 3541 3542 3543
		}
	}

	return 0;
}

3544 3545 3546 3547 3548 3549 3550 3551 3552
static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
{
	int i;

	for (i = 0; clocks[i].flag; i++)
		seq_printf(m, "\t%s: %s\n", clocks[i].name,
			   (flags & clocks[i].flag) ? "On" : "Off");
}

A
Alex Deucher 已提交
3553 3554 3555 3556 3557
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct amdgpu_device *adev = dev->dev_private;
3558
	u32 flags = 0;
3559 3560
	int r;

3561
	if (amdgpu_in_reset(adev))
3562 3563
		return -EPERM;

3564
	r = pm_runtime_get_sync(dev->dev);
3565 3566
	if (r < 0) {
		pm_runtime_put_autosuspend(dev->dev);
3567
		return r;
3568
	}
3569

3570 3571
	if (!adev->pm.dpm_enabled) {
		seq_printf(m, "dpm not enabled\n");
3572 3573
		pm_runtime_mark_last_busy(dev->dev);
		pm_runtime_put_autosuspend(dev->dev);
3574 3575
		return 0;
	}
3576 3577 3578

	if (!is_support_sw_smu(adev) &&
	    adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
A
Alex Deucher 已提交
3579
		mutex_lock(&adev->pm.mutex);
3580 3581
		if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
			adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
A
Alex Deucher 已提交
3582 3583 3584
		else
			seq_printf(m, "Debugfs support not implemented for this asic\n");
		mutex_unlock(&adev->pm.mutex);
3585
		r = 0;
R
Rex Zhu 已提交
3586
	} else {
3587
		r = amdgpu_debugfs_pm_info_pp(m, adev);
A
Alex Deucher 已提交
3588
	}
3589 3590 3591 3592 3593 3594 3595 3596
	if (r)
		goto out;

	amdgpu_device_ip_get_clockgating_state(adev, &flags);

	seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
	amdgpu_parse_cg_state(m, flags);
	seq_printf(m, "\n");
A
Alex Deucher 已提交
3597

3598
out:
3599 3600 3601 3602
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);

	return r;
A
Alex Deucher 已提交
3603 3604
}

3605
static const struct drm_info_list amdgpu_pm_info_list[] = {
A
Alex Deucher 已提交
3606 3607 3608 3609
	{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
};
#endif

3610
int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
3611 3612 3613 3614 3615 3616 3617
{
#if defined(CONFIG_DEBUG_FS)
	return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
#else
	return 0;
#endif
}