amd_powerplay.c 30.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28 29
#include "amd_shared.h"
#include "amd_powerplay.h"
30
#include "power_state.h"
31
#include "amdgpu.h"
R
Rex Zhu 已提交
32
#include "hwmgr.h"
33

34 35
#define PP_DPM_DISABLED 0xCCCC

R
Rex Zhu 已提交
36
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37
		enum amd_pm_state_type *user_state);
R
Rex Zhu 已提交
38

39
static const struct amd_pm_funcs pp_dpm_funcs;
40

41 42 43
static inline int pp_check(struct pp_hwmgr *hwmgr)
{
	if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 45
		return -EINVAL;

46
	if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47
		return PP_DPM_DISABLED;
48

49 50
	return 0;
}
51

52
static int amd_powerplay_create(struct amdgpu_device *adev)
53
{
54
	struct pp_hwmgr *hwmgr;
55

56
	if (adev == NULL)
57 58
		return -EINVAL;

59 60
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
61 62
		return -ENOMEM;

63 64 65 66 67 68 69 70 71
	hwmgr->adev = adev;
	hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
	hwmgr->feature_mask = amdgpu_pp_feature_mask;
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
72 73 74
	return 0;
}

75

76
static int amd_powerplay_destroy(struct amdgpu_device *adev)
77
{
78
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79

80 81
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
82

83 84
	kfree(hwmgr);
	hwmgr = NULL;
85 86 87 88

	return 0;
}

89 90 91
static int pp_early_init(void *handle)
{
	int ret;
92
	struct amdgpu_device *adev = handle;
93

94
	ret = amd_powerplay_create(adev);
95

96 97 98
	if (ret != 0)
		return ret;

99
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
100
	if (ret)
101
		return -EINVAL;
102

103
	return 0;
104 105
}

106
static int pp_sw_init(void *handle)
107
{
108 109
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 111
	int ret = 0;

112
	ret = pp_check(hwmgr);
113

114
	if (ret >= 0) {
115
		if (hwmgr->smumgr_funcs->smu_init == NULL)
116
			return -EINVAL;
117

118
		ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119

120
		pr_debug("amdgpu: powerplay sw initialized\n");
121
	}
122

123 124
	return ret;
}
125

126 127
static int pp_sw_fini(void *handle)
{
128 129
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
130 131
	int ret = 0;

132
	ret = pp_check(hwmgr);
133
	if (ret >= 0) {
134 135
		if (hwmgr->smumgr_funcs->smu_fini != NULL)
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
136
	}
137 138 139 140

	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_fini_bo(adev);

141
	return 0;
142 143 144 145
}

static int pp_hw_init(void *handle)
{
146
	int ret = 0;
147 148
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
149

150 151
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
152

153
	ret = pp_check(hwmgr);
154

155
	if (ret >= 0) {
156
		if (hwmgr->smumgr_funcs->start_smu == NULL)
157
			return -EINVAL;
158

159
		if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
160
			pr_err("smc start failed\n");
161
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
162
			return -EINVAL;
163 164
		}
		if (ret == PP_DPM_DISABLED)
165
			goto exit;
166
		ret = hwmgr_hw_init(hwmgr);
167 168
		if (ret)
			goto exit;
169
	}
170 171
	return ret;
exit:
172
	hwmgr->pm_en = 0;
173 174 175
	cgs_notify_dpm_enabled(hwmgr->device, false);
	return 0;

176 177 178 179
}

static int pp_hw_fini(void *handle)
{
180 181
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
182
	int ret = 0;
183

184
	ret = pp_check(hwmgr);
185
	if (ret == 0)
186
		hwmgr_hw_fini(hwmgr);
187

188 189 190
	return 0;
}

R
Rex Zhu 已提交
191 192
static int pp_late_init(void *handle)
{
193 194
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
R
Rex Zhu 已提交
195 196
	int ret = 0;

197 198
	ret = pp_check(hwmgr);

R
Rex Zhu 已提交
199
	if (ret == 0)
200
		pp_dpm_dispatch_tasks(hwmgr,
201
					AMD_PP_TASK_COMPLETE_INIT, NULL);
R
Rex Zhu 已提交
202 203 204 205

	return 0;
}

206 207
static void pp_late_fini(void *handle)
{
208 209 210
	struct amdgpu_device *adev = handle;

	amd_powerplay_destroy(adev);
211 212 213
}


214 215
static bool pp_is_idle(void *handle)
{
216
	return false;
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
232 233
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
234
	int ret = 0;
235

236
	ret = pp_check(hwmgr);
237

238
	if (ret)
239
		return ret;
240

241
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
242
		pr_info("%s was not implemented.\n", __func__);
243 244
		return 0;
	}
245 246 247

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
248
			state == AMD_PG_STATE_GATE);
249 250 251 252
}

static int pp_suspend(void *handle)
{
253 254
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
255
	int ret = 0;
256

257
	ret = pp_check(hwmgr);
258
	if (ret == 0)
259
		hwmgr_hw_suspend(hwmgr);
260
	return 0;
261 262 263 264
}

static int pp_resume(void *handle)
{
265 266
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
267
	int ret;
268

269
	ret = pp_check(hwmgr);
270

271 272
	if (ret < 0)
		return ret;
273

274
	if (hwmgr->smumgr_funcs->start_smu == NULL)
275 276
		return -EINVAL;

277
	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
278
		pr_err("smc start failed\n");
279
		hwmgr->smumgr_funcs->smu_fini(hwmgr);
280
		return -EINVAL;
281 282
	}

283
	if (ret == PP_DPM_DISABLED)
M
Monk Liu 已提交
284
		return 0;
285

286
	return hwmgr_hw_resume(hwmgr);
287 288
}

289 290 291 292 293 294
static int pp_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	return 0;
}

295
static const struct amd_ip_funcs pp_ip_funcs = {
296
	.name = "powerplay",
297
	.early_init = pp_early_init,
R
Rex Zhu 已提交
298
	.late_init = pp_late_init,
299 300 301 302
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
303
	.late_fini = pp_late_fini,
304 305 306 307 308
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
309
	.set_clockgating_state = pp_set_clockgating_state,
310 311 312
	.set_powergating_state = pp_set_powergating_state,
};

313 314 315 316 317 318 319 320 321
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

322 323 324 325 326 327 328 329 330 331
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

332 333
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
334
	struct pp_hwmgr *hwmgr = handle;
335 336
	int ret = 0;

337
	ret = pp_check(hwmgr);
338 339 340 341 342 343 344 345 346 347 348 349

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
			cgs_set_clockgating_state(hwmgr->device,
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
			cgs_set_clockgating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

386 387 388
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
389
	struct pp_hwmgr *hwmgr = handle;
390
	int ret = 0;
391

392
	ret = pp_check(hwmgr);
393

394
	if (ret)
395
		return ret;
396

397 398 399
	if (level == hwmgr->dpm_level)
		return 0;

400
	mutex_lock(&hwmgr->smu_lock);
401 402
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
403 404
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
405

406 407
	return 0;
}
408

409 410 411
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
412
	struct pp_hwmgr *hwmgr = handle;
413
	int ret = 0;
414
	enum amd_dpm_forced_level level;
415

416
	ret = pp_check(hwmgr);
417

418
	if (ret)
419
		return ret;
420

421
	mutex_lock(&hwmgr->smu_lock);
422
	level = hwmgr->dpm_level;
423
	mutex_unlock(&hwmgr->smu_lock);
424
	return level;
425
}
426

427
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
428
{
429
	struct pp_hwmgr *hwmgr = handle;
430
	int ret = 0;
431
	uint32_t clk = 0;
432

433
	ret = pp_check(hwmgr);
434

435
	if (ret)
436
		return ret;
437

438
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
439
		pr_info("%s was not implemented.\n", __func__);
440 441
		return 0;
	}
442
	mutex_lock(&hwmgr->smu_lock);
443
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
444
	mutex_unlock(&hwmgr->smu_lock);
445
	return clk;
446
}
447

448
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
449
{
450
	struct pp_hwmgr *hwmgr = handle;
451
	int ret = 0;
452
	uint32_t clk = 0;
453

454
	ret = pp_check(hwmgr);
455

456
	if (ret)
457
		return ret;
458

459
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
460
		pr_info("%s was not implemented.\n", __func__);
461 462
		return 0;
	}
463
	mutex_lock(&hwmgr->smu_lock);
464
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
465
	mutex_unlock(&hwmgr->smu_lock);
466
	return clk;
467
}
468

469
static void pp_dpm_powergate_vce(void *handle, bool gate)
470
{
471
	struct pp_hwmgr *hwmgr = handle;
472
	int ret = 0;
473

474
	ret = pp_check(hwmgr);
475

476
	if (ret)
477
		return;
478

479
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
480
		pr_info("%s was not implemented.\n", __func__);
481
		return;
482
	}
483
	mutex_lock(&hwmgr->smu_lock);
484
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
485
	mutex_unlock(&hwmgr->smu_lock);
486
}
487

488
static void pp_dpm_powergate_uvd(void *handle, bool gate)
489
{
490
	struct pp_hwmgr *hwmgr = handle;
491
	int ret = 0;
492

493
	ret = pp_check(hwmgr);
494

495
	if (ret)
496
		return;
497

498
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
499
		pr_info("%s was not implemented.\n", __func__);
500
		return;
501
	}
502
	mutex_lock(&hwmgr->smu_lock);
503
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
504
	mutex_unlock(&hwmgr->smu_lock);
505 506
}

507
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
508
		enum amd_pm_state_type *user_state)
509
{
510
	int ret = 0;
511
	struct pp_hwmgr *hwmgr = handle;
512

513
	ret = pp_check(hwmgr);
514

515
	if (ret)
516
		return ret;
517

518 519 520
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
521

522
	return ret;
523
}
524

525
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
526
{
527
	struct pp_hwmgr *hwmgr = handle;
528
	struct pp_power_state *state;
529
	int ret = 0;
530
	enum amd_pm_state_type pm_type;
531

532
	ret = pp_check(hwmgr);
533

534
	if (ret)
535
		return ret;
536

537
	if (hwmgr->current_ps == NULL)
538 539
		return -EINVAL;

540
	mutex_lock(&hwmgr->smu_lock);
541

542 543 544 545
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
546
		pm_type = POWER_STATE_TYPE_BATTERY;
547
		break;
548
	case PP_StateUILabel_Balanced:
549
		pm_type = POWER_STATE_TYPE_BALANCED;
550
		break;
551
	case PP_StateUILabel_Performance:
552
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
553
		break;
554
	default:
555
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
556
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
557
		else
558
			pm_type = POWER_STATE_TYPE_DEFAULT;
559
		break;
560
	}
561
	mutex_unlock(&hwmgr->smu_lock);
562 563

	return pm_type;
564
}
565

566
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
567
{
568
	struct pp_hwmgr *hwmgr = handle;
569
	int ret = 0;
570

571
	ret = pp_check(hwmgr);
572

573
	if (ret)
574
		return;
575

576
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
577
		pr_info("%s was not implemented.\n", __func__);
578
		return;
579
	}
580
	mutex_lock(&hwmgr->smu_lock);
581
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
582
	mutex_unlock(&hwmgr->smu_lock);
583 584
}

585
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
586
{
587
	struct pp_hwmgr *hwmgr = handle;
588
	int ret = 0;
589
	uint32_t mode = 0;
590

591
	ret = pp_check(hwmgr);
592

593
	if (ret)
594
		return ret;
595

596
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
597
		pr_info("%s was not implemented.\n", __func__);
598 599
		return 0;
	}
600
	mutex_lock(&hwmgr->smu_lock);
601
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
602
	mutex_unlock(&hwmgr->smu_lock);
603
	return mode;
604 605 606 607
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
608
	struct pp_hwmgr *hwmgr = handle;
609
	int ret = 0;
610

611
	ret = pp_check(hwmgr);
612

613
	if (ret)
614
		return ret;
615

616
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
617
		pr_info("%s was not implemented.\n", __func__);
618 619
		return 0;
	}
620
	mutex_lock(&hwmgr->smu_lock);
621
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
622
	mutex_unlock(&hwmgr->smu_lock);
623
	return ret;
624 625 626 627
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
628
	struct pp_hwmgr *hwmgr = handle;
629
	int ret = 0;
630

631
	ret = pp_check(hwmgr);
632

633
	if (ret)
634
		return ret;
635

636
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
637
		pr_info("%s was not implemented.\n", __func__);
638 639
		return 0;
	}
640

641
	mutex_lock(&hwmgr->smu_lock);
642
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
643
	mutex_unlock(&hwmgr->smu_lock);
644
	return ret;
645 646
}

647 648
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
649
	struct pp_hwmgr *hwmgr = handle;
650
	int ret = 0;
651

652
	ret = pp_check(hwmgr);
653

654
	if (ret)
655
		return ret;
656 657 658 659

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

660
	mutex_lock(&hwmgr->smu_lock);
661
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
662
	mutex_unlock(&hwmgr->smu_lock);
663
	return ret;
664 665
}

666 667 668
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
669
	struct pp_hwmgr *hwmgr = handle;
670
	int i;
671
	int ret = 0;
672

673 674
	memset(data, 0, sizeof(*data));

675
	ret = pp_check(hwmgr);
676

677
	if (ret)
678 679 680
		return ret;

	if (hwmgr->ps == NULL)
681 682
		return -EINVAL;

683
	mutex_lock(&hwmgr->smu_lock);
684

685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
707
	mutex_unlock(&hwmgr->smu_lock);
708 709 710 711 712
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
713
	struct pp_hwmgr *hwmgr = handle;
714
	int ret = 0;
715
	int size = 0;
716

717
	ret = pp_check(hwmgr);
718

719
	if (ret)
720
		return ret;
721

722 723 724
	if (!hwmgr->soft_pp_table)
		return -EINVAL;

725
	mutex_lock(&hwmgr->smu_lock);
726
	*table = (char *)hwmgr->soft_pp_table;
727
	size = hwmgr->soft_pp_table_size;
728
	mutex_unlock(&hwmgr->smu_lock);
729
	return size;
730 731
}

732 733
static int amd_powerplay_reset(void *handle)
{
734
	struct pp_hwmgr *hwmgr = handle;
735 736
	int ret;

737
	ret = pp_check(hwmgr);
738 739 740
	if (ret)
		return ret;

741
	ret = hwmgr_hw_fini(hwmgr);
742 743 744
	if (ret)
		return ret;

745
	ret = hwmgr_hw_init(hwmgr);
746 747 748
	if (ret)
		return ret;

749
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
750 751
}

752 753
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
754
	struct pp_hwmgr *hwmgr = handle;
755
	int ret = 0;
756

757
	ret = pp_check(hwmgr);
758

759
	if (ret)
760
		return ret;
761

762
	mutex_lock(&hwmgr->smu_lock);
763
	if (!hwmgr->hardcode_pp_table) {
764 765 766
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
767
		if (!hwmgr->hardcode_pp_table) {
768
			mutex_unlock(&hwmgr->smu_lock);
769
			return -ENOMEM;
770
		}
771
	}
772

773 774 775
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
776
	mutex_unlock(&hwmgr->smu_lock);
777

778 779 780 781 782 783 784 785 786 787 788
	ret = amd_powerplay_reset(handle);
	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
			return ret;
	}

	return 0;
789 790 791
}

static int pp_dpm_force_clock_level(void *handle,
792
		enum pp_clock_type type, uint32_t mask)
793
{
794
	struct pp_hwmgr *hwmgr = handle;
795
	int ret = 0;
796

797
	ret = pp_check(hwmgr);
798

799
	if (ret)
800
		return ret;
801

802
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
803
		pr_info("%s was not implemented.\n", __func__);
804 805
		return 0;
	}
806
	mutex_lock(&hwmgr->smu_lock);
807 808 809 810
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
811
	mutex_unlock(&hwmgr->smu_lock);
812
	return ret;
813 814 815 816 817
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
818
	struct pp_hwmgr *hwmgr = handle;
819
	int ret = 0;
820

821
	ret = pp_check(hwmgr);
822

823
	if (ret)
824
		return ret;
825

826
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
827
		pr_info("%s was not implemented.\n", __func__);
828 829
		return 0;
	}
830
	mutex_lock(&hwmgr->smu_lock);
831
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
832
	mutex_unlock(&hwmgr->smu_lock);
833
	return ret;
834 835
}

836 837
static int pp_dpm_get_sclk_od(void *handle)
{
838
	struct pp_hwmgr *hwmgr = handle;
839
	int ret = 0;
840

841
	ret = pp_check(hwmgr);
842

843
	if (ret)
844
		return ret;
845 846

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
847
		pr_info("%s was not implemented.\n", __func__);
848 849
		return 0;
	}
850
	mutex_lock(&hwmgr->smu_lock);
851
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
852
	mutex_unlock(&hwmgr->smu_lock);
853
	return ret;
854 855 856 857
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
858
	struct pp_hwmgr *hwmgr = handle;
859
	int ret = 0;
860

861
	ret = pp_check(hwmgr);
862

863
	if (ret)
864
		return ret;
865 866

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
867
		pr_info("%s was not implemented.\n", __func__);
868 869 870
		return 0;
	}

871
	mutex_lock(&hwmgr->smu_lock);
872
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
873
	mutex_unlock(&hwmgr->smu_lock);
874
	return ret;
875 876
}

877 878
static int pp_dpm_get_mclk_od(void *handle)
{
879
	struct pp_hwmgr *hwmgr = handle;
880
	int ret = 0;
881

882
	ret = pp_check(hwmgr);
883

884
	if (ret)
885
		return ret;
886 887

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
888
		pr_info("%s was not implemented.\n", __func__);
889 890
		return 0;
	}
891
	mutex_lock(&hwmgr->smu_lock);
892
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
893
	mutex_unlock(&hwmgr->smu_lock);
894
	return ret;
895 896 897 898
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
899
	struct pp_hwmgr *hwmgr = handle;
900
	int ret = 0;
901

902
	ret = pp_check(hwmgr);
903

904
	if (ret)
905
		return ret;
906 907

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
908
		pr_info("%s was not implemented.\n", __func__);
909 910
		return 0;
	}
911
	mutex_lock(&hwmgr->smu_lock);
912
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
913
	mutex_unlock(&hwmgr->smu_lock);
914
	return ret;
915 916
}

917 918
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
919
{
920
	struct pp_hwmgr *hwmgr = handle;
921
	int ret = 0;
922

923
	ret = pp_check(hwmgr);
924
	if (ret)
925
		return ret;
926

927 928 929 930 931 932 933 934 935
	if (value == NULL)
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
936
		return 0;
937
	default:
938
		mutex_lock(&hwmgr->smu_lock);
939
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
940
		mutex_unlock(&hwmgr->smu_lock);
941
		return ret;
942 943 944
	}
}

945 946 947
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
948
	struct pp_hwmgr *hwmgr = handle;
949
	int ret = 0;
950

951
	ret = pp_check(hwmgr);
952

953
	if (ret)
954 955 956 957
		return NULL;

	if (hwmgr && idx < hwmgr->num_vce_state_tables)
		return &hwmgr->vce_states[idx];
958 959 960
	return NULL;
}

961 962
static int pp_get_power_profile_mode(void *handle, char *buf)
{
963
	struct pp_hwmgr *hwmgr = handle;
964

965
	if (!buf || pp_check(hwmgr))
966 967 968 969 970 971 972 973 974 975 976 977
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
978
	struct pp_hwmgr *hwmgr = handle;
979
	int ret = -EINVAL;
980

981
	if (pp_check(hwmgr))
982 983 984 985 986 987
		return -EINVAL;

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}
988
	mutex_lock(&hwmgr->smu_lock);
989 990
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
991
	mutex_unlock(&hwmgr->smu_lock);
992
	return ret;
993 994
}

995 996
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
997
	struct pp_hwmgr *hwmgr = handle;
998

999
	if (pp_check(hwmgr))
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

1010
static int pp_dpm_switch_power_profile(void *handle,
1011
		enum PP_SMC_POWER_PROFILE type, bool en)
1012
{
1013
	struct pp_hwmgr *hwmgr = handle;
1014 1015
	long workload;
	uint32_t index;
1016

1017
	if (pp_check(hwmgr))
1018 1019
		return -EINVAL;

1020 1021 1022 1023 1024 1025 1026 1027
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

1028
	mutex_lock(&hwmgr->smu_lock);
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
1040 1041
	}

1042 1043
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1044
	mutex_unlock(&hwmgr->smu_lock);
1045

1046 1047 1048
	return 0;
}

1049 1050 1051 1052 1053 1054 1055
static int pp_dpm_notify_smu_memory_info(void *handle,
					uint32_t virtual_addr_low,
					uint32_t virtual_addr_hi,
					uint32_t mc_addr_low,
					uint32_t mc_addr_hi,
					uint32_t size)
{
1056
	struct pp_hwmgr *hwmgr = handle;
1057 1058
	int ret = 0;

1059
	ret = pp_check(hwmgr);
1060 1061 1062 1063 1064 1065 1066 1067 1068

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

1069
	mutex_lock(&hwmgr->smu_lock);
1070 1071 1072 1073 1074

	ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
					virtual_addr_hi, mc_addr_low, mc_addr_hi,
					size);

1075
	mutex_unlock(&hwmgr->smu_lock);
1076 1077 1078 1079

	return ret;
}

1080 1081
static int pp_set_power_limit(void *handle, uint32_t limit)
{
1082
	struct pp_hwmgr *hwmgr = handle;
1083 1084
	int ret = 0;

1085
	ret = pp_check(hwmgr);
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

1101
	mutex_lock(&hwmgr->smu_lock);
1102 1103
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
1104
	mutex_unlock(&hwmgr->smu_lock);
1105 1106 1107 1108 1109
	return ret;
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
1110
	struct pp_hwmgr *hwmgr = handle;
1111 1112
	int ret = 0;

1113
	ret = pp_check(hwmgr);
1114 1115 1116 1117 1118 1119 1120

	if (ret)
		return ret;

	if (limit == NULL)
		return -EINVAL;

1121
	mutex_lock(&hwmgr->smu_lock);
1122 1123 1124 1125 1126 1127

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

1128
	mutex_unlock(&hwmgr->smu_lock);
1129 1130 1131 1132

	return ret;
}

1133
static int pp_display_configuration_change(void *handle,
1134
	const struct amd_pp_display_configuration *display_config)
1135
{
1136
	struct pp_hwmgr *hwmgr = handle;
1137
	int ret = 0;
1138

1139
	ret = pp_check(hwmgr);
1140

1141
	if (ret)
1142
		return ret;
1143

1144
	mutex_lock(&hwmgr->smu_lock);
1145
	phm_store_dal_configuration_data(hwmgr, display_config);
1146
	mutex_unlock(&hwmgr->smu_lock);
1147 1148
	return 0;
}
1149

1150
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
1151
		struct amd_pp_simple_clock_info *output)
1152
{
1153
	struct pp_hwmgr *hwmgr = handle;
1154
	int ret = 0;
1155

1156
	ret = pp_check(hwmgr);
1157

1158
	if (ret)
1159
		return ret;
1160

1161 1162
	if (output == NULL)
		return -EINVAL;
1163

1164
	mutex_lock(&hwmgr->smu_lock);
1165
	ret = phm_get_dal_power_level(hwmgr, output);
1166
	mutex_unlock(&hwmgr->smu_lock);
1167
	return ret;
1168
}
1169

1170
static int pp_get_current_clocks(void *handle,
1171
		struct amd_pp_clock_info *clocks)
1172 1173 1174
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1175
	struct pp_hwmgr *hwmgr = handle;
1176
	int ret = 0;
1177

1178
	ret = pp_check(hwmgr);
1179

1180
	if (ret)
1181
		return ret;
1182

1183
	mutex_lock(&hwmgr->smu_lock);
1184

1185 1186
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1187 1188 1189 1190 1191 1192 1193 1194
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1195
	if (ret) {
1196
		pr_info("Error in phm_get_clock_info \n");
1197
		mutex_unlock(&hwmgr->smu_lock);
1198
		return -EINVAL;
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1217
	mutex_unlock(&hwmgr->smu_lock);
1218 1219 1220
	return 0;
}

1221
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1222
{
1223
	struct pp_hwmgr *hwmgr = handle;
1224
	int ret = 0;
1225

1226
	ret = pp_check(hwmgr);
1227

1228
	if (ret)
1229 1230
		return ret;

1231
	if (clocks == NULL)
1232 1233
		return -EINVAL;

1234
	mutex_lock(&hwmgr->smu_lock);
1235
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1236
	mutex_unlock(&hwmgr->smu_lock);
1237
	return ret;
1238 1239
}

1240
static int pp_get_clock_by_type_with_latency(void *handle,
1241 1242 1243
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1244
	struct pp_hwmgr *hwmgr = handle;
1245 1246
	int ret = 0;

1247
	ret = pp_check(hwmgr);
1248
	if (ret)
1249 1250 1251 1252 1253
		return ret;

	if (!clocks)
		return -EINVAL;

1254
	mutex_lock(&hwmgr->smu_lock);
1255
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1256
	mutex_unlock(&hwmgr->smu_lock);
1257 1258 1259
	return ret;
}

1260
static int pp_get_clock_by_type_with_voltage(void *handle,
1261 1262 1263
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1264
	struct pp_hwmgr *hwmgr = handle;
1265 1266
	int ret = 0;

1267
	ret = pp_check(hwmgr);
1268
	if (ret)
1269 1270 1271 1272 1273
		return ret;

	if (!clocks)
		return -EINVAL;

1274
	mutex_lock(&hwmgr->smu_lock);
1275 1276 1277

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1278
	mutex_unlock(&hwmgr->smu_lock);
1279 1280 1281
	return ret;
}

1282
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1283 1284
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1285
	struct pp_hwmgr *hwmgr = handle;
1286 1287
	int ret = 0;

1288
	ret = pp_check(hwmgr);
1289
	if (ret)
1290 1291 1292 1293 1294
		return ret;

	if (!wm_with_clock_ranges)
		return -EINVAL;

1295
	mutex_lock(&hwmgr->smu_lock);
1296 1297
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1298
	mutex_unlock(&hwmgr->smu_lock);
1299 1300 1301 1302

	return ret;
}

1303
static int pp_display_clock_voltage_request(void *handle,
1304 1305
		struct pp_display_clock_request *clock)
{
1306
	struct pp_hwmgr *hwmgr = handle;
1307 1308
	int ret = 0;

1309
	ret = pp_check(hwmgr);
1310
	if (ret)
1311 1312 1313 1314 1315
		return ret;

	if (!clock)
		return -EINVAL;

1316
	mutex_lock(&hwmgr->smu_lock);
1317
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1318
	mutex_unlock(&hwmgr->smu_lock);
1319 1320 1321 1322

	return ret;
}

1323
static int pp_get_display_mode_validation_clocks(void *handle,
1324
		struct amd_pp_simple_clock_info *clocks)
1325
{
1326
	struct pp_hwmgr *hwmgr = handle;
1327
	int ret = 0;
1328

1329
	ret = pp_check(hwmgr);
1330

1331
	if (ret)
1332 1333 1334 1335
		return ret;

	if (clocks == NULL)
		return -EINVAL;
1336

1337
	mutex_lock(&hwmgr->smu_lock);
1338

1339
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1340
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1341

1342
	mutex_unlock(&hwmgr->smu_lock);
1343
	return ret;
1344 1345
}

1346 1347
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1348
	struct pp_hwmgr *hwmgr = handle;
1349 1350
	int ret = 0;

1351
	ret = pp_check(hwmgr);
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1364
static const struct amd_pm_funcs pp_dpm_funcs = {
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1391
	.notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1392 1393
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1394
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1395 1396
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1409
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1410
};