amd_powerplay.c 31.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28 29
#include "amd_shared.h"
#include "amd_powerplay.h"
30
#include "power_state.h"
31
#include "amdgpu.h"
R
Rex Zhu 已提交
32
#include "hwmgr.h"
33

34 35
#define PP_DPM_DISABLED 0xCCCC

R
Rex Zhu 已提交
36
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37
		enum amd_pm_state_type *user_state);
R
Rex Zhu 已提交
38

39
static const struct amd_pm_funcs pp_dpm_funcs;
40

41 42 43
static inline int pp_check(struct pp_hwmgr *hwmgr)
{
	if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 45
		return -EINVAL;

46
	if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47
		return PP_DPM_DISABLED;
48

49 50
	return 0;
}
51

52
static int amd_powerplay_create(struct amdgpu_device *adev)
53
{
54
	struct pp_hwmgr *hwmgr;
55

56
	if (adev == NULL)
57 58
		return -EINVAL;

59 60
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
61 62
		return -ENOMEM;

63 64 65 66 67 68 69 70 71
	hwmgr->adev = adev;
	hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
	hwmgr->feature_mask = amdgpu_pp_feature_mask;
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
72 73 74
	return 0;
}

75

76
static int amd_powerplay_destroy(struct amdgpu_device *adev)
77
{
78
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79

80 81
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
82

83 84
	kfree(hwmgr);
	hwmgr = NULL;
85 86 87 88

	return 0;
}

89 90 91
static int pp_early_init(void *handle)
{
	int ret;
92
	struct amdgpu_device *adev = handle;
93

94
	ret = amd_powerplay_create(adev);
95

96 97 98
	if (ret != 0)
		return ret;

99
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
100
	if (ret)
101
		return -EINVAL;
102

103
	return 0;
104 105
}

106
static int pp_sw_init(void *handle)
107
{
108 109
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 111
	int ret = 0;

112
	ret = pp_check(hwmgr);
113

114
	if (ret >= 0) {
115
		if (hwmgr->smumgr_funcs->smu_init == NULL)
116
			return -EINVAL;
117

118
		ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119

120 121
		phm_register_irq_handlers(hwmgr);

122
		pr_debug("amdgpu: powerplay sw initialized\n");
123
	}
124

125 126
	return ret;
}
127

128 129
static int pp_sw_fini(void *handle)
{
130 131
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
132 133
	int ret = 0;

134
	ret = pp_check(hwmgr);
135
	if (ret >= 0) {
136 137
		if (hwmgr->smumgr_funcs->smu_fini != NULL)
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
138
	}
139 140 141 142

	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_fini_bo(adev);

143
	return 0;
144 145 146 147
}

static int pp_hw_init(void *handle)
{
148
	int ret = 0;
149 150
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
151

152 153
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
154

155
	ret = pp_check(hwmgr);
156

157
	if (ret >= 0) {
158
		if (hwmgr->smumgr_funcs->start_smu == NULL)
159
			return -EINVAL;
160

161
		if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
162
			pr_err("smc start failed\n");
163
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
164
			return -EINVAL;
165 166
		}
		if (ret == PP_DPM_DISABLED)
167
			goto exit;
168
		ret = hwmgr_hw_init(hwmgr);
169 170
		if (ret)
			goto exit;
171
	}
172 173
	return ret;
exit:
174
	hwmgr->pm_en = 0;
175 176 177
	cgs_notify_dpm_enabled(hwmgr->device, false);
	return 0;

178 179 180 181
}

static int pp_hw_fini(void *handle)
{
182 183
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
184
	int ret = 0;
185

186
	ret = pp_check(hwmgr);
187
	if (ret == 0)
188
		hwmgr_hw_fini(hwmgr);
189

190 191 192
	return 0;
}

R
Rex Zhu 已提交
193 194
static int pp_late_init(void *handle)
{
195 196
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
R
Rex Zhu 已提交
197 198
	int ret = 0;

199 200
	ret = pp_check(hwmgr);

R
Rex Zhu 已提交
201
	if (ret == 0)
202
		pp_dpm_dispatch_tasks(hwmgr,
203
					AMD_PP_TASK_COMPLETE_INIT, NULL);
R
Rex Zhu 已提交
204 205 206 207

	return 0;
}

208 209
static void pp_late_fini(void *handle)
{
210 211 212
	struct amdgpu_device *adev = handle;

	amd_powerplay_destroy(adev);
213 214 215
}


216 217
static bool pp_is_idle(void *handle)
{
218
	return false;
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
234 235
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
236
	int ret = 0;
237

238
	ret = pp_check(hwmgr);
239

240
	if (ret)
241
		return ret;
242

243
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
244
		pr_info("%s was not implemented.\n", __func__);
245 246
		return 0;
	}
247 248 249

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
250
			state == AMD_PG_STATE_GATE);
251 252 253 254
}

static int pp_suspend(void *handle)
{
255 256
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
257
	int ret = 0;
258

259
	ret = pp_check(hwmgr);
260
	if (ret == 0)
261
		hwmgr_hw_suspend(hwmgr);
262
	return 0;
263 264 265 266
}

static int pp_resume(void *handle)
{
267 268
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269
	int ret;
270

271
	ret = pp_check(hwmgr);
272

273 274
	if (ret < 0)
		return ret;
275

276
	if (hwmgr->smumgr_funcs->start_smu == NULL)
277 278
		return -EINVAL;

279
	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
280
		pr_err("smc start failed\n");
281
		hwmgr->smumgr_funcs->smu_fini(hwmgr);
282
		return -EINVAL;
283 284
	}

285
	if (ret == PP_DPM_DISABLED)
M
Monk Liu 已提交
286
		return 0;
287

288
	return hwmgr_hw_resume(hwmgr);
289 290
}

291 292 293 294 295 296
static int pp_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	return 0;
}

297
static const struct amd_ip_funcs pp_ip_funcs = {
298
	.name = "powerplay",
299
	.early_init = pp_early_init,
R
Rex Zhu 已提交
300
	.late_init = pp_late_init,
301 302 303 304
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
305
	.late_fini = pp_late_fini,
306 307 308 309 310
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
311
	.set_clockgating_state = pp_set_clockgating_state,
312 313 314
	.set_powergating_state = pp_set_powergating_state,
};

315 316 317 318 319 320 321 322 323
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

324 325 326 327 328 329 330 331 332 333
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

334 335
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
336
	struct pp_hwmgr *hwmgr = handle;
337 338
	int ret = 0;

339
	ret = pp_check(hwmgr);
340 341 342 343 344 345 346 347 348 349 350 351

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
			cgs_set_clockgating_state(hwmgr->device,
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
			cgs_set_clockgating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

388 389 390
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
391
	struct pp_hwmgr *hwmgr = handle;
392
	int ret = 0;
393

394
	ret = pp_check(hwmgr);
395

396
	if (ret)
397
		return ret;
398

399 400 401
	if (level == hwmgr->dpm_level)
		return 0;

402
	mutex_lock(&hwmgr->smu_lock);
403 404
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
405 406
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
407

408 409
	return 0;
}
410

411 412 413
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
414
	struct pp_hwmgr *hwmgr = handle;
415
	int ret = 0;
416
	enum amd_dpm_forced_level level;
417

418
	ret = pp_check(hwmgr);
419

420
	if (ret)
421
		return ret;
422

423
	mutex_lock(&hwmgr->smu_lock);
424
	level = hwmgr->dpm_level;
425
	mutex_unlock(&hwmgr->smu_lock);
426
	return level;
427
}
428

429
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
430
{
431
	struct pp_hwmgr *hwmgr = handle;
432
	int ret = 0;
433
	uint32_t clk = 0;
434

435
	ret = pp_check(hwmgr);
436

437
	if (ret)
438
		return ret;
439

440
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
441
		pr_info("%s was not implemented.\n", __func__);
442 443
		return 0;
	}
444
	mutex_lock(&hwmgr->smu_lock);
445
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
446
	mutex_unlock(&hwmgr->smu_lock);
447
	return clk;
448
}
449

450
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
451
{
452
	struct pp_hwmgr *hwmgr = handle;
453
	int ret = 0;
454
	uint32_t clk = 0;
455

456
	ret = pp_check(hwmgr);
457

458
	if (ret)
459
		return ret;
460

461
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
462
		pr_info("%s was not implemented.\n", __func__);
463 464
		return 0;
	}
465
	mutex_lock(&hwmgr->smu_lock);
466
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
467
	mutex_unlock(&hwmgr->smu_lock);
468
	return clk;
469
}
470

471
static void pp_dpm_powergate_vce(void *handle, bool gate)
472
{
473
	struct pp_hwmgr *hwmgr = handle;
474
	int ret = 0;
475

476
	ret = pp_check(hwmgr);
477

478
	if (ret)
479
		return;
480

481
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
482
		pr_info("%s was not implemented.\n", __func__);
483
		return;
484
	}
485
	mutex_lock(&hwmgr->smu_lock);
486
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
487
	mutex_unlock(&hwmgr->smu_lock);
488
}
489

490
static void pp_dpm_powergate_uvd(void *handle, bool gate)
491
{
492
	struct pp_hwmgr *hwmgr = handle;
493
	int ret = 0;
494

495
	ret = pp_check(hwmgr);
496

497
	if (ret)
498
		return;
499

500
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
501
		pr_info("%s was not implemented.\n", __func__);
502
		return;
503
	}
504
	mutex_lock(&hwmgr->smu_lock);
505
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
506
	mutex_unlock(&hwmgr->smu_lock);
507 508
}

509
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
510
		enum amd_pm_state_type *user_state)
511
{
512
	int ret = 0;
513
	struct pp_hwmgr *hwmgr = handle;
514

515
	ret = pp_check(hwmgr);
516

517
	if (ret)
518
		return ret;
519

520 521 522
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
523

524
	return ret;
525
}
526

527
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
528
{
529
	struct pp_hwmgr *hwmgr = handle;
530
	struct pp_power_state *state;
531
	int ret = 0;
532
	enum amd_pm_state_type pm_type;
533

534
	ret = pp_check(hwmgr);
535

536
	if (ret)
537
		return ret;
538

539
	if (hwmgr->current_ps == NULL)
540 541
		return -EINVAL;

542
	mutex_lock(&hwmgr->smu_lock);
543

544 545 546 547
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
548
		pm_type = POWER_STATE_TYPE_BATTERY;
549
		break;
550
	case PP_StateUILabel_Balanced:
551
		pm_type = POWER_STATE_TYPE_BALANCED;
552
		break;
553
	case PP_StateUILabel_Performance:
554
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
555
		break;
556
	default:
557
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
558
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
559
		else
560
			pm_type = POWER_STATE_TYPE_DEFAULT;
561
		break;
562
	}
563
	mutex_unlock(&hwmgr->smu_lock);
564 565

	return pm_type;
566
}
567

568
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
569
{
570
	struct pp_hwmgr *hwmgr = handle;
571
	int ret = 0;
572

573
	ret = pp_check(hwmgr);
574

575
	if (ret)
576
		return;
577

578
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
579
		pr_info("%s was not implemented.\n", __func__);
580
		return;
581
	}
582
	mutex_lock(&hwmgr->smu_lock);
583
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
584
	mutex_unlock(&hwmgr->smu_lock);
585 586
}

587
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
588
{
589
	struct pp_hwmgr *hwmgr = handle;
590
	int ret = 0;
591
	uint32_t mode = 0;
592

593
	ret = pp_check(hwmgr);
594

595
	if (ret)
596
		return ret;
597

598
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
599
		pr_info("%s was not implemented.\n", __func__);
600 601
		return 0;
	}
602
	mutex_lock(&hwmgr->smu_lock);
603
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
604
	mutex_unlock(&hwmgr->smu_lock);
605
	return mode;
606 607 608 609
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
610
	struct pp_hwmgr *hwmgr = handle;
611
	int ret = 0;
612

613
	ret = pp_check(hwmgr);
614

615
	if (ret)
616
		return ret;
617

618
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
619
		pr_info("%s was not implemented.\n", __func__);
620 621
		return 0;
	}
622
	mutex_lock(&hwmgr->smu_lock);
623
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
624
	mutex_unlock(&hwmgr->smu_lock);
625
	return ret;
626 627 628 629
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
630
	struct pp_hwmgr *hwmgr = handle;
631
	int ret = 0;
632

633
	ret = pp_check(hwmgr);
634

635
	if (ret)
636
		return ret;
637

638
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
639
		pr_info("%s was not implemented.\n", __func__);
640 641
		return 0;
	}
642

643
	mutex_lock(&hwmgr->smu_lock);
644
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
645
	mutex_unlock(&hwmgr->smu_lock);
646
	return ret;
647 648
}

649 650
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
651
	struct pp_hwmgr *hwmgr = handle;
652
	int ret = 0;
653

654
	ret = pp_check(hwmgr);
655

656
	if (ret)
657
		return ret;
658 659 660 661

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

662
	mutex_lock(&hwmgr->smu_lock);
663
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
664
	mutex_unlock(&hwmgr->smu_lock);
665
	return ret;
666 667
}

668 669 670
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
671
	struct pp_hwmgr *hwmgr = handle;
672
	int i;
673
	int ret = 0;
674

675 676
	memset(data, 0, sizeof(*data));

677
	ret = pp_check(hwmgr);
678

679
	if (ret)
680 681 682
		return ret;

	if (hwmgr->ps == NULL)
683 684
		return -EINVAL;

685
	mutex_lock(&hwmgr->smu_lock);
686

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
709
	mutex_unlock(&hwmgr->smu_lock);
710 711 712 713 714
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
715
	struct pp_hwmgr *hwmgr = handle;
716
	int ret = 0;
717
	int size = 0;
718

719
	ret = pp_check(hwmgr);
720

721
	if (ret)
722
		return ret;
723

724 725 726
	if (!hwmgr->soft_pp_table)
		return -EINVAL;

727
	mutex_lock(&hwmgr->smu_lock);
728
	*table = (char *)hwmgr->soft_pp_table;
729
	size = hwmgr->soft_pp_table_size;
730
	mutex_unlock(&hwmgr->smu_lock);
731
	return size;
732 733
}

734 735
static int amd_powerplay_reset(void *handle)
{
736
	struct pp_hwmgr *hwmgr = handle;
737 738
	int ret;

739
	ret = pp_check(hwmgr);
740 741 742
	if (ret)
		return ret;

743
	ret = hwmgr_hw_fini(hwmgr);
744 745 746
	if (ret)
		return ret;

747
	ret = hwmgr_hw_init(hwmgr);
748 749 750
	if (ret)
		return ret;

751
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
752 753
}

754 755
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
756
	struct pp_hwmgr *hwmgr = handle;
757
	int ret = 0;
758

759
	ret = pp_check(hwmgr);
760

761
	if (ret)
762
		return ret;
763

764
	mutex_lock(&hwmgr->smu_lock);
765
	if (!hwmgr->hardcode_pp_table) {
766 767 768
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
769
		if (!hwmgr->hardcode_pp_table) {
770
			mutex_unlock(&hwmgr->smu_lock);
771
			return -ENOMEM;
772
		}
773
	}
774

775 776 777
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
778
	mutex_unlock(&hwmgr->smu_lock);
779

780 781 782 783 784 785 786 787 788 789 790
	ret = amd_powerplay_reset(handle);
	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
			return ret;
	}

	return 0;
791 792 793
}

static int pp_dpm_force_clock_level(void *handle,
794
		enum pp_clock_type type, uint32_t mask)
795
{
796
	struct pp_hwmgr *hwmgr = handle;
797
	int ret = 0;
798

799
	ret = pp_check(hwmgr);
800

801
	if (ret)
802
		return ret;
803

804
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
805
		pr_info("%s was not implemented.\n", __func__);
806 807
		return 0;
	}
808
	mutex_lock(&hwmgr->smu_lock);
809 810 811 812
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
813
	mutex_unlock(&hwmgr->smu_lock);
814
	return ret;
815 816 817 818 819
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
820
	struct pp_hwmgr *hwmgr = handle;
821
	int ret = 0;
822

823
	ret = pp_check(hwmgr);
824

825
	if (ret)
826
		return ret;
827

828
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
829
		pr_info("%s was not implemented.\n", __func__);
830 831
		return 0;
	}
832
	mutex_lock(&hwmgr->smu_lock);
833
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
834
	mutex_unlock(&hwmgr->smu_lock);
835
	return ret;
836 837
}

838 839
static int pp_dpm_get_sclk_od(void *handle)
{
840
	struct pp_hwmgr *hwmgr = handle;
841
	int ret = 0;
842

843
	ret = pp_check(hwmgr);
844

845
	if (ret)
846
		return ret;
847 848

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
849
		pr_info("%s was not implemented.\n", __func__);
850 851
		return 0;
	}
852
	mutex_lock(&hwmgr->smu_lock);
853
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
854
	mutex_unlock(&hwmgr->smu_lock);
855
	return ret;
856 857 858 859
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
860
	struct pp_hwmgr *hwmgr = handle;
861
	int ret = 0;
862

863
	ret = pp_check(hwmgr);
864

865
	if (ret)
866
		return ret;
867 868

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
869
		pr_info("%s was not implemented.\n", __func__);
870 871 872
		return 0;
	}

873
	mutex_lock(&hwmgr->smu_lock);
874
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
875
	mutex_unlock(&hwmgr->smu_lock);
876
	return ret;
877 878
}

879 880
static int pp_dpm_get_mclk_od(void *handle)
{
881
	struct pp_hwmgr *hwmgr = handle;
882
	int ret = 0;
883

884
	ret = pp_check(hwmgr);
885

886
	if (ret)
887
		return ret;
888 889

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
890
		pr_info("%s was not implemented.\n", __func__);
891 892
		return 0;
	}
893
	mutex_lock(&hwmgr->smu_lock);
894
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
895
	mutex_unlock(&hwmgr->smu_lock);
896
	return ret;
897 898 899 900
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
901
	struct pp_hwmgr *hwmgr = handle;
902
	int ret = 0;
903

904
	ret = pp_check(hwmgr);
905

906
	if (ret)
907
		return ret;
908 909

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
910
		pr_info("%s was not implemented.\n", __func__);
911 912
		return 0;
	}
913
	mutex_lock(&hwmgr->smu_lock);
914
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
915
	mutex_unlock(&hwmgr->smu_lock);
916
	return ret;
917 918
}

919 920
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
921
{
922
	struct pp_hwmgr *hwmgr = handle;
923
	int ret = 0;
924

925
	ret = pp_check(hwmgr);
926
	if (ret)
927
		return ret;
928

929 930 931 932 933 934 935 936 937
	if (value == NULL)
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
938
		return 0;
939
	default:
940
		mutex_lock(&hwmgr->smu_lock);
941
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
942
		mutex_unlock(&hwmgr->smu_lock);
943
		return ret;
944 945 946
	}
}

947 948 949
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
950
	struct pp_hwmgr *hwmgr = handle;
951
	int ret = 0;
952

953
	ret = pp_check(hwmgr);
954

955
	if (ret)
956 957 958 959
		return NULL;

	if (hwmgr && idx < hwmgr->num_vce_state_tables)
		return &hwmgr->vce_states[idx];
960 961 962
	return NULL;
}

963 964
static int pp_get_power_profile_mode(void *handle, char *buf)
{
965
	struct pp_hwmgr *hwmgr = handle;
966

967
	if (!buf || pp_check(hwmgr))
968 969 970 971 972 973 974 975 976 977 978 979
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
980
	struct pp_hwmgr *hwmgr = handle;
981
	int ret = -EINVAL;
982

983
	if (pp_check(hwmgr))
984 985 986 987 988 989
		return -EINVAL;

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}
990
	mutex_lock(&hwmgr->smu_lock);
991 992
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
993
	mutex_unlock(&hwmgr->smu_lock);
994
	return ret;
995 996
}

997 998
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
999
	struct pp_hwmgr *hwmgr = handle;
1000

1001
	if (pp_check(hwmgr))
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

1012
static int pp_dpm_switch_power_profile(void *handle,
1013
		enum PP_SMC_POWER_PROFILE type, bool en)
1014
{
1015
	struct pp_hwmgr *hwmgr = handle;
1016 1017
	long workload;
	uint32_t index;
1018

1019
	if (pp_check(hwmgr))
1020 1021
		return -EINVAL;

1022 1023 1024 1025 1026 1027 1028 1029
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

1030
	mutex_lock(&hwmgr->smu_lock);
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
1042 1043
	}

1044 1045
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1046
	mutex_unlock(&hwmgr->smu_lock);
1047

1048 1049 1050
	return 0;
}

1051 1052 1053 1054 1055 1056 1057
static int pp_dpm_notify_smu_memory_info(void *handle,
					uint32_t virtual_addr_low,
					uint32_t virtual_addr_hi,
					uint32_t mc_addr_low,
					uint32_t mc_addr_hi,
					uint32_t size)
{
1058
	struct pp_hwmgr *hwmgr = handle;
1059 1060
	int ret = 0;

1061
	ret = pp_check(hwmgr);
1062 1063 1064 1065 1066 1067 1068 1069 1070

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

1071
	mutex_lock(&hwmgr->smu_lock);
1072 1073 1074 1075 1076

	ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
					virtual_addr_hi, mc_addr_low, mc_addr_hi,
					size);

1077
	mutex_unlock(&hwmgr->smu_lock);
1078 1079 1080 1081

	return ret;
}

1082 1083
static int pp_set_power_limit(void *handle, uint32_t limit)
{
1084
	struct pp_hwmgr *hwmgr = handle;
1085 1086
	int ret = 0;

1087
	ret = pp_check(hwmgr);
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

1103
	mutex_lock(&hwmgr->smu_lock);
1104 1105
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
1106
	mutex_unlock(&hwmgr->smu_lock);
1107 1108 1109 1110 1111
	return ret;
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
1112
	struct pp_hwmgr *hwmgr = handle;
1113 1114
	int ret = 0;

1115
	ret = pp_check(hwmgr);
1116 1117 1118 1119 1120 1121 1122

	if (ret)
		return ret;

	if (limit == NULL)
		return -EINVAL;

1123
	mutex_lock(&hwmgr->smu_lock);
1124 1125 1126 1127 1128 1129

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

1130
	mutex_unlock(&hwmgr->smu_lock);
1131 1132 1133 1134

	return ret;
}

1135
static int pp_display_configuration_change(void *handle,
1136
	const struct amd_pp_display_configuration *display_config)
1137
{
1138
	struct pp_hwmgr *hwmgr = handle;
1139
	int ret = 0;
1140

1141
	ret = pp_check(hwmgr);
1142

1143
	if (ret)
1144
		return ret;
1145

1146
	mutex_lock(&hwmgr->smu_lock);
1147
	phm_store_dal_configuration_data(hwmgr, display_config);
1148
	mutex_unlock(&hwmgr->smu_lock);
1149 1150
	return 0;
}
1151

1152
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
1153
		struct amd_pp_simple_clock_info *output)
1154
{
1155
	struct pp_hwmgr *hwmgr = handle;
1156
	int ret = 0;
1157

1158
	ret = pp_check(hwmgr);
1159

1160
	if (ret)
1161
		return ret;
1162

1163 1164
	if (output == NULL)
		return -EINVAL;
1165

1166
	mutex_lock(&hwmgr->smu_lock);
1167
	ret = phm_get_dal_power_level(hwmgr, output);
1168
	mutex_unlock(&hwmgr->smu_lock);
1169
	return ret;
1170
}
1171

1172
static int pp_get_current_clocks(void *handle,
1173
		struct amd_pp_clock_info *clocks)
1174 1175 1176
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1177
	struct pp_hwmgr *hwmgr = handle;
1178
	int ret = 0;
1179

1180
	ret = pp_check(hwmgr);
1181

1182
	if (ret)
1183
		return ret;
1184

1185
	mutex_lock(&hwmgr->smu_lock);
1186

1187 1188
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1189 1190 1191 1192 1193 1194 1195 1196
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1197
	if (ret) {
1198
		pr_info("Error in phm_get_clock_info \n");
1199
		mutex_unlock(&hwmgr->smu_lock);
1200
		return -EINVAL;
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1219
	mutex_unlock(&hwmgr->smu_lock);
1220 1221 1222
	return 0;
}

1223
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1224
{
1225
	struct pp_hwmgr *hwmgr = handle;
1226
	int ret = 0;
1227

1228
	ret = pp_check(hwmgr);
1229

1230
	if (ret)
1231 1232
		return ret;

1233
	if (clocks == NULL)
1234 1235
		return -EINVAL;

1236
	mutex_lock(&hwmgr->smu_lock);
1237
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1238
	mutex_unlock(&hwmgr->smu_lock);
1239
	return ret;
1240 1241
}

1242
static int pp_get_clock_by_type_with_latency(void *handle,
1243 1244 1245
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1246
	struct pp_hwmgr *hwmgr = handle;
1247 1248
	int ret = 0;

1249
	ret = pp_check(hwmgr);
1250
	if (ret)
1251 1252 1253 1254 1255
		return ret;

	if (!clocks)
		return -EINVAL;

1256
	mutex_lock(&hwmgr->smu_lock);
1257
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1258
	mutex_unlock(&hwmgr->smu_lock);
1259 1260 1261
	return ret;
}

1262
static int pp_get_clock_by_type_with_voltage(void *handle,
1263 1264 1265
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1266
	struct pp_hwmgr *hwmgr = handle;
1267 1268
	int ret = 0;

1269
	ret = pp_check(hwmgr);
1270
	if (ret)
1271 1272 1273 1274 1275
		return ret;

	if (!clocks)
		return -EINVAL;

1276
	mutex_lock(&hwmgr->smu_lock);
1277 1278 1279

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1280
	mutex_unlock(&hwmgr->smu_lock);
1281 1282 1283
	return ret;
}

1284
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1285 1286
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1287
	struct pp_hwmgr *hwmgr = handle;
1288 1289
	int ret = 0;

1290
	ret = pp_check(hwmgr);
1291
	if (ret)
1292 1293 1294 1295 1296
		return ret;

	if (!wm_with_clock_ranges)
		return -EINVAL;

1297
	mutex_lock(&hwmgr->smu_lock);
1298 1299
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1300
	mutex_unlock(&hwmgr->smu_lock);
1301 1302 1303 1304

	return ret;
}

1305
static int pp_display_clock_voltage_request(void *handle,
1306 1307
		struct pp_display_clock_request *clock)
{
1308
	struct pp_hwmgr *hwmgr = handle;
1309 1310
	int ret = 0;

1311
	ret = pp_check(hwmgr);
1312
	if (ret)
1313 1314 1315 1316 1317
		return ret;

	if (!clock)
		return -EINVAL;

1318
	mutex_lock(&hwmgr->smu_lock);
1319
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1320
	mutex_unlock(&hwmgr->smu_lock);
1321 1322 1323 1324

	return ret;
}

1325
static int pp_get_display_mode_validation_clocks(void *handle,
1326
		struct amd_pp_simple_clock_info *clocks)
1327
{
1328
	struct pp_hwmgr *hwmgr = handle;
1329
	int ret = 0;
1330

1331
	ret = pp_check(hwmgr);
1332

1333
	if (ret)
1334 1335 1336 1337
		return ret;

	if (clocks == NULL)
		return -EINVAL;
1338

1339
	mutex_lock(&hwmgr->smu_lock);
1340

1341
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1342
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1343

1344
	mutex_unlock(&hwmgr->smu_lock);
1345
	return ret;
1346 1347
}

1348 1349
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1350
	struct pp_hwmgr *hwmgr = handle;
1351 1352
	int ret = 0;

1353
	ret = pp_check(hwmgr);
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1366
static const struct amd_pm_funcs pp_dpm_funcs = {
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1393
	.notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1394 1395
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1396
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1397 1398
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1411
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1412
};