amd_powerplay.c 30.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28 29
#include "amd_shared.h"
#include "amd_powerplay.h"
30
#include "power_state.h"
31
#include "amdgpu.h"
R
Rex Zhu 已提交
32
#include "hwmgr.h"
33

34 35
#define PP_DPM_DISABLED 0xCCCC

R
Rex Zhu 已提交
36
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37
		enum amd_pm_state_type *user_state);
R
Rex Zhu 已提交
38

39
static const struct amd_pm_funcs pp_dpm_funcs;
40

41 42 43
static inline int pp_check(struct pp_hwmgr *hwmgr)
{
	if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 45
		return -EINVAL;

46
	if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47
		return PP_DPM_DISABLED;
48

49 50
	return 0;
}
51

52
static int amd_powerplay_create(struct amdgpu_device *adev)
53
{
54
	struct pp_hwmgr *hwmgr;
55

56
	if (adev == NULL)
57 58
		return -EINVAL;

59 60
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
61 62
		return -ENOMEM;

63 64 65 66 67 68 69 70 71
	hwmgr->adev = adev;
	hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
	hwmgr->feature_mask = amdgpu_pp_feature_mask;
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
72 73 74
	return 0;
}

75

76
static int amd_powerplay_destroy(struct amdgpu_device *adev)
77
{
78
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79

80 81
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
82

83 84
	kfree(hwmgr);
	hwmgr = NULL;
85 86 87 88

	return 0;
}

89 90 91
static int pp_early_init(void *handle)
{
	int ret;
92
	struct amdgpu_device *adev = handle;
93

94
	ret = amd_powerplay_create(adev);
95

96 97 98
	if (ret != 0)
		return ret;

99
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
100
	if (ret)
101
		return -EINVAL;
102

103
	return 0;
104 105
}

106
static int pp_sw_init(void *handle)
107
{
108 109
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 111
	int ret = 0;

112
	ret = pp_check(hwmgr);
113

114
	if (ret >= 0) {
115
		if (hwmgr->smumgr_funcs->smu_init == NULL)
116
			return -EINVAL;
117

118
		ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119

120
		pr_debug("amdgpu: powerplay sw initialized\n");
121
	}
122

123 124
	return ret;
}
125

126 127
static int pp_sw_fini(void *handle)
{
128 129
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
130 131
	int ret = 0;

132
	ret = pp_check(hwmgr);
133
	if (ret >= 0) {
134 135
		if (hwmgr->smumgr_funcs->smu_fini != NULL)
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
136
	}
137 138 139 140

	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_fini_bo(adev);

141
	return 0;
142 143 144 145
}

static int pp_hw_init(void *handle)
{
146
	int ret = 0;
147 148
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
149

150 151
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
152

153
	ret = pp_check(hwmgr);
154

155
	if (ret >= 0) {
156
		if (hwmgr->smumgr_funcs->start_smu == NULL)
157
			return -EINVAL;
158

159
		if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
160
			pr_err("smc start failed\n");
161
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
162
			return -EINVAL;
163 164
		}
		if (ret == PP_DPM_DISABLED)
165
			goto exit;
166
		ret = hwmgr_hw_init(hwmgr);
167 168
		if (ret)
			goto exit;
169
	}
170 171
	return ret;
exit:
172
	hwmgr->pm_en = 0;
173 174 175
	cgs_notify_dpm_enabled(hwmgr->device, false);
	return 0;

176 177 178 179
}

static int pp_hw_fini(void *handle)
{
180 181
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
182
	int ret = 0;
183

184
	ret = pp_check(hwmgr);
185
	if (ret == 0)
186
		hwmgr_hw_fini(hwmgr);
187

188 189 190
	return 0;
}

R
Rex Zhu 已提交
191 192
static int pp_late_init(void *handle)
{
193 194
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
R
Rex Zhu 已提交
195 196
	int ret = 0;

197 198
	ret = pp_check(hwmgr);

R
Rex Zhu 已提交
199
	if (ret == 0)
200
		pp_dpm_dispatch_tasks(hwmgr,
201
					AMD_PP_TASK_COMPLETE_INIT, NULL);
R
Rex Zhu 已提交
202 203 204 205

	return 0;
}

206 207
static void pp_late_fini(void *handle)
{
208 209 210
	struct amdgpu_device *adev = handle;

	amd_powerplay_destroy(adev);
211 212 213
}


214 215
static bool pp_is_idle(void *handle)
{
216
	return false;
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
232 233
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
234
	int ret = 0;
235

236
	ret = pp_check(hwmgr);
237

238
	if (ret)
239
		return ret;
240

241
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
242
		pr_info("%s was not implemented.\n", __func__);
243 244
		return 0;
	}
245 246 247

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
248
			state == AMD_PG_STATE_GATE);
249 250 251 252
}

static int pp_suspend(void *handle)
{
253 254
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
255
	int ret = 0;
256

257
	ret = pp_check(hwmgr);
258
	if (ret == 0)
259
		hwmgr_hw_suspend(hwmgr);
260
	return 0;
261 262 263 264
}

static int pp_resume(void *handle)
{
265 266
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
267
	int ret;
268

269
	ret = pp_check(hwmgr);
270

271 272
	if (ret < 0)
		return ret;
273

274
	if (hwmgr->smumgr_funcs->start_smu == NULL)
275 276
		return -EINVAL;

277
	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
278
		pr_err("smc start failed\n");
279
		hwmgr->smumgr_funcs->smu_fini(hwmgr);
280
		return -EINVAL;
281 282
	}

283
	if (ret == PP_DPM_DISABLED)
M
Monk Liu 已提交
284
		return 0;
285

286
	return hwmgr_hw_resume(hwmgr);
287 288
}

289
static const struct amd_ip_funcs pp_ip_funcs = {
290
	.name = "powerplay",
291
	.early_init = pp_early_init,
R
Rex Zhu 已提交
292
	.late_init = pp_late_init,
293 294 295 296
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
297
	.late_fini = pp_late_fini,
298 299 300 301 302
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
303
	.set_clockgating_state = NULL,
304 305 306
	.set_powergating_state = pp_set_powergating_state,
};

307 308 309 310 311 312 313 314 315
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

316 317 318 319 320 321 322 323 324 325
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

326 327
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
328
	struct pp_hwmgr *hwmgr = handle;
329 330
	int ret = 0;

331
	ret = pp_check(hwmgr);
332 333 334 335 336 337 338 339 340 341 342 343

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
			cgs_set_clockgating_state(hwmgr->device,
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
			cgs_set_clockgating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

380 381 382
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
383
	struct pp_hwmgr *hwmgr = handle;
384
	int ret = 0;
385

386
	ret = pp_check(hwmgr);
387

388
	if (ret)
389
		return ret;
390

391 392 393
	if (level == hwmgr->dpm_level)
		return 0;

394
	mutex_lock(&hwmgr->smu_lock);
395 396
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
397 398
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
399

400 401
	return 0;
}
402

403 404 405
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
406
	struct pp_hwmgr *hwmgr = handle;
407
	int ret = 0;
408
	enum amd_dpm_forced_level level;
409

410
	ret = pp_check(hwmgr);
411

412
	if (ret)
413
		return ret;
414

415
	mutex_lock(&hwmgr->smu_lock);
416
	level = hwmgr->dpm_level;
417
	mutex_unlock(&hwmgr->smu_lock);
418
	return level;
419
}
420

421
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
422
{
423
	struct pp_hwmgr *hwmgr = handle;
424
	int ret = 0;
425
	uint32_t clk = 0;
426

427
	ret = pp_check(hwmgr);
428

429
	if (ret)
430
		return ret;
431

432
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
433
		pr_info("%s was not implemented.\n", __func__);
434 435
		return 0;
	}
436
	mutex_lock(&hwmgr->smu_lock);
437
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
438
	mutex_unlock(&hwmgr->smu_lock);
439
	return clk;
440
}
441

442
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
443
{
444
	struct pp_hwmgr *hwmgr = handle;
445
	int ret = 0;
446
	uint32_t clk = 0;
447

448
	ret = pp_check(hwmgr);
449

450
	if (ret)
451
		return ret;
452

453
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
454
		pr_info("%s was not implemented.\n", __func__);
455 456
		return 0;
	}
457
	mutex_lock(&hwmgr->smu_lock);
458
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
459
	mutex_unlock(&hwmgr->smu_lock);
460
	return clk;
461
}
462

463
static void pp_dpm_powergate_vce(void *handle, bool gate)
464
{
465
	struct pp_hwmgr *hwmgr = handle;
466
	int ret = 0;
467

468
	ret = pp_check(hwmgr);
469

470
	if (ret)
471
		return;
472

473
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
474
		pr_info("%s was not implemented.\n", __func__);
475
		return;
476
	}
477
	mutex_lock(&hwmgr->smu_lock);
478
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
479
	mutex_unlock(&hwmgr->smu_lock);
480
}
481

482
static void pp_dpm_powergate_uvd(void *handle, bool gate)
483
{
484
	struct pp_hwmgr *hwmgr = handle;
485
	int ret = 0;
486

487
	ret = pp_check(hwmgr);
488

489
	if (ret)
490
		return;
491

492
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
493
		pr_info("%s was not implemented.\n", __func__);
494
		return;
495
	}
496
	mutex_lock(&hwmgr->smu_lock);
497
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
498
	mutex_unlock(&hwmgr->smu_lock);
499 500
}

501
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
502
		enum amd_pm_state_type *user_state)
503
{
504
	int ret = 0;
505
	struct pp_hwmgr *hwmgr = handle;
506

507
	ret = pp_check(hwmgr);
508

509
	if (ret)
510
		return ret;
511

512 513 514
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
515

516
	return ret;
517
}
518

519
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
520
{
521
	struct pp_hwmgr *hwmgr = handle;
522
	struct pp_power_state *state;
523
	int ret = 0;
524
	enum amd_pm_state_type pm_type;
525

526
	ret = pp_check(hwmgr);
527

528
	if (ret)
529
		return ret;
530

531
	if (hwmgr->current_ps == NULL)
532 533
		return -EINVAL;

534
	mutex_lock(&hwmgr->smu_lock);
535

536 537 538 539
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
540
		pm_type = POWER_STATE_TYPE_BATTERY;
541
		break;
542
	case PP_StateUILabel_Balanced:
543
		pm_type = POWER_STATE_TYPE_BALANCED;
544
		break;
545
	case PP_StateUILabel_Performance:
546
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
547
		break;
548
	default:
549
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
550
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
551
		else
552
			pm_type = POWER_STATE_TYPE_DEFAULT;
553
		break;
554
	}
555
	mutex_unlock(&hwmgr->smu_lock);
556 557

	return pm_type;
558
}
559

560
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
561
{
562
	struct pp_hwmgr *hwmgr = handle;
563
	int ret = 0;
564

565
	ret = pp_check(hwmgr);
566

567
	if (ret)
568
		return;
569

570
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
571
		pr_info("%s was not implemented.\n", __func__);
572
		return;
573
	}
574
	mutex_lock(&hwmgr->smu_lock);
575
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
576
	mutex_unlock(&hwmgr->smu_lock);
577 578
}

579
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
580
{
581
	struct pp_hwmgr *hwmgr = handle;
582
	int ret = 0;
583
	uint32_t mode = 0;
584

585
	ret = pp_check(hwmgr);
586

587
	if (ret)
588
		return ret;
589

590
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
591
		pr_info("%s was not implemented.\n", __func__);
592 593
		return 0;
	}
594
	mutex_lock(&hwmgr->smu_lock);
595
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
596
	mutex_unlock(&hwmgr->smu_lock);
597
	return mode;
598 599 600 601
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
602
	struct pp_hwmgr *hwmgr = handle;
603
	int ret = 0;
604

605
	ret = pp_check(hwmgr);
606

607
	if (ret)
608
		return ret;
609

610
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
611
		pr_info("%s was not implemented.\n", __func__);
612 613
		return 0;
	}
614
	mutex_lock(&hwmgr->smu_lock);
615
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
616
	mutex_unlock(&hwmgr->smu_lock);
617
	return ret;
618 619 620 621
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
622
	struct pp_hwmgr *hwmgr = handle;
623
	int ret = 0;
624

625
	ret = pp_check(hwmgr);
626

627
	if (ret)
628
		return ret;
629

630
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
631
		pr_info("%s was not implemented.\n", __func__);
632 633
		return 0;
	}
634

635
	mutex_lock(&hwmgr->smu_lock);
636
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
637
	mutex_unlock(&hwmgr->smu_lock);
638
	return ret;
639 640
}

641 642
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
643
	struct pp_hwmgr *hwmgr = handle;
644
	int ret = 0;
645

646
	ret = pp_check(hwmgr);
647

648
	if (ret)
649
		return ret;
650 651 652 653

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

654
	mutex_lock(&hwmgr->smu_lock);
655
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
656
	mutex_unlock(&hwmgr->smu_lock);
657
	return ret;
658 659
}

660 661 662
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
663
	struct pp_hwmgr *hwmgr = handle;
664
	int i;
665
	int ret = 0;
666

667 668
	memset(data, 0, sizeof(*data));

669
	ret = pp_check(hwmgr);
670

671
	if (ret)
672 673 674
		return ret;

	if (hwmgr->ps == NULL)
675 676
		return -EINVAL;

677
	mutex_lock(&hwmgr->smu_lock);
678

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
701
	mutex_unlock(&hwmgr->smu_lock);
702 703 704 705 706
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
707
	struct pp_hwmgr *hwmgr = handle;
708
	int ret = 0;
709
	int size = 0;
710

711
	ret = pp_check(hwmgr);
712

713
	if (ret)
714
		return ret;
715

716 717 718
	if (!hwmgr->soft_pp_table)
		return -EINVAL;

719
	mutex_lock(&hwmgr->smu_lock);
720
	*table = (char *)hwmgr->soft_pp_table;
721
	size = hwmgr->soft_pp_table_size;
722
	mutex_unlock(&hwmgr->smu_lock);
723
	return size;
724 725
}

726 727
static int amd_powerplay_reset(void *handle)
{
728
	struct pp_hwmgr *hwmgr = handle;
729 730
	int ret;

731
	ret = pp_check(hwmgr);
732 733 734
	if (ret)
		return ret;

735
	ret = pp_hw_fini(hwmgr);
736 737 738
	if (ret)
		return ret;

739
	ret = hwmgr_hw_init(hwmgr);
740 741 742
	if (ret)
		return ret;

743
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
744 745
}

746 747
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
748
	struct pp_hwmgr *hwmgr = handle;
749
	int ret = 0;
750

751
	ret = pp_check(hwmgr);
752

753
	if (ret)
754
		return ret;
755

756
	mutex_lock(&hwmgr->smu_lock);
757
	if (!hwmgr->hardcode_pp_table) {
758 759 760
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
761
		if (!hwmgr->hardcode_pp_table) {
762
			mutex_unlock(&hwmgr->smu_lock);
763
			return -ENOMEM;
764
		}
765
	}
766

767 768 769
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
770
	mutex_unlock(&hwmgr->smu_lock);
771

772 773 774 775 776 777 778 779 780 781 782
	ret = amd_powerplay_reset(handle);
	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
			return ret;
	}

	return 0;
783 784 785
}

static int pp_dpm_force_clock_level(void *handle,
786
		enum pp_clock_type type, uint32_t mask)
787
{
788
	struct pp_hwmgr *hwmgr = handle;
789
	int ret = 0;
790

791
	ret = pp_check(hwmgr);
792

793
	if (ret)
794
		return ret;
795

796
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
797
		pr_info("%s was not implemented.\n", __func__);
798 799
		return 0;
	}
800
	mutex_lock(&hwmgr->smu_lock);
801 802 803 804
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
805
	mutex_unlock(&hwmgr->smu_lock);
806
	return ret;
807 808 809 810 811
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
812
	struct pp_hwmgr *hwmgr = handle;
813
	int ret = 0;
814

815
	ret = pp_check(hwmgr);
816

817
	if (ret)
818
		return ret;
819

820
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
821
		pr_info("%s was not implemented.\n", __func__);
822 823
		return 0;
	}
824
	mutex_lock(&hwmgr->smu_lock);
825
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
826
	mutex_unlock(&hwmgr->smu_lock);
827
	return ret;
828 829
}

830 831
static int pp_dpm_get_sclk_od(void *handle)
{
832
	struct pp_hwmgr *hwmgr = handle;
833
	int ret = 0;
834

835
	ret = pp_check(hwmgr);
836

837
	if (ret)
838
		return ret;
839 840

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
841
		pr_info("%s was not implemented.\n", __func__);
842 843
		return 0;
	}
844
	mutex_lock(&hwmgr->smu_lock);
845
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
846
	mutex_unlock(&hwmgr->smu_lock);
847
	return ret;
848 849 850 851
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
852
	struct pp_hwmgr *hwmgr = handle;
853
	int ret = 0;
854

855
	ret = pp_check(hwmgr);
856

857
	if (ret)
858
		return ret;
859 860

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
861
		pr_info("%s was not implemented.\n", __func__);
862 863 864
		return 0;
	}

865
	mutex_lock(&hwmgr->smu_lock);
866
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
867
	mutex_unlock(&hwmgr->smu_lock);
868
	return ret;
869 870
}

871 872
static int pp_dpm_get_mclk_od(void *handle)
{
873
	struct pp_hwmgr *hwmgr = handle;
874
	int ret = 0;
875

876
	ret = pp_check(hwmgr);
877

878
	if (ret)
879
		return ret;
880 881

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
882
		pr_info("%s was not implemented.\n", __func__);
883 884
		return 0;
	}
885
	mutex_lock(&hwmgr->smu_lock);
886
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
887
	mutex_unlock(&hwmgr->smu_lock);
888
	return ret;
889 890 891 892
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
893
	struct pp_hwmgr *hwmgr = handle;
894
	int ret = 0;
895

896
	ret = pp_check(hwmgr);
897

898
	if (ret)
899
		return ret;
900 901

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
902
		pr_info("%s was not implemented.\n", __func__);
903 904
		return 0;
	}
905
	mutex_lock(&hwmgr->smu_lock);
906
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
907
	mutex_unlock(&hwmgr->smu_lock);
908
	return ret;
909 910
}

911 912
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
913
{
914
	struct pp_hwmgr *hwmgr = handle;
915
	int ret = 0;
916

917
	ret = pp_check(hwmgr);
918
	if (ret)
919
		return ret;
920

921 922 923 924 925 926 927 928 929
	if (value == NULL)
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
930
		return 0;
931
	default:
932
		mutex_lock(&hwmgr->smu_lock);
933
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
934
		mutex_unlock(&hwmgr->smu_lock);
935
		return ret;
936 937 938
	}
}

939 940 941
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
942
	struct pp_hwmgr *hwmgr = handle;
943
	int ret = 0;
944

945
	ret = pp_check(hwmgr);
946

947
	if (ret)
948 949 950 951
		return NULL;

	if (hwmgr && idx < hwmgr->num_vce_state_tables)
		return &hwmgr->vce_states[idx];
952 953 954
	return NULL;
}

955 956
static int pp_get_power_profile_mode(void *handle, char *buf)
{
957
	struct pp_hwmgr *hwmgr = handle;
958

959
	if (!buf || pp_check(hwmgr))
960 961 962 963 964 965 966 967 968 969 970 971
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
972
	struct pp_hwmgr *hwmgr = handle;
973
	int ret = -EINVAL;
974

975
	if (pp_check(hwmgr))
976 977 978 979 980 981
		return -EINVAL;

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}
982
	mutex_lock(&hwmgr->smu_lock);
983 984
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
985
	mutex_unlock(&hwmgr->smu_lock);
986
	return ret;
987 988
}

989 990
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
991
	struct pp_hwmgr *hwmgr = handle;
992

993
	if (pp_check(hwmgr))
994 995 996 997 998 999 1000 1001 1002 1003
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

1004
static int pp_dpm_switch_power_profile(void *handle,
1005
		enum PP_SMC_POWER_PROFILE type, bool en)
1006
{
1007
	struct pp_hwmgr *hwmgr = handle;
1008 1009
	long workload;
	uint32_t index;
1010

1011
	if (pp_check(hwmgr))
1012 1013
		return -EINVAL;

1014 1015 1016 1017 1018 1019 1020 1021
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

1022
	mutex_lock(&hwmgr->smu_lock);
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
1034 1035
	}

1036 1037
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1038
	mutex_unlock(&hwmgr->smu_lock);
1039

1040 1041 1042
	return 0;
}

1043 1044 1045 1046 1047 1048 1049
static int pp_dpm_notify_smu_memory_info(void *handle,
					uint32_t virtual_addr_low,
					uint32_t virtual_addr_hi,
					uint32_t mc_addr_low,
					uint32_t mc_addr_hi,
					uint32_t size)
{
1050
	struct pp_hwmgr *hwmgr = handle;
1051 1052
	int ret = 0;

1053
	ret = pp_check(hwmgr);
1054 1055 1056 1057 1058 1059 1060 1061 1062

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

1063
	mutex_lock(&hwmgr->smu_lock);
1064 1065 1066 1067 1068

	ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
					virtual_addr_hi, mc_addr_low, mc_addr_hi,
					size);

1069
	mutex_unlock(&hwmgr->smu_lock);
1070 1071 1072 1073

	return ret;
}

1074 1075
static int pp_set_power_limit(void *handle, uint32_t limit)
{
1076
	struct pp_hwmgr *hwmgr = handle;
1077 1078
	int ret = 0;

1079
	ret = pp_check(hwmgr);
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

1095
	mutex_lock(&hwmgr->smu_lock);
1096 1097
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
1098
	mutex_unlock(&hwmgr->smu_lock);
1099 1100 1101 1102 1103
	return ret;
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
1104
	struct pp_hwmgr *hwmgr = handle;
1105 1106
	int ret = 0;

1107
	ret = pp_check(hwmgr);
1108 1109 1110 1111 1112 1113 1114

	if (ret)
		return ret;

	if (limit == NULL)
		return -EINVAL;

1115
	mutex_lock(&hwmgr->smu_lock);
1116 1117 1118 1119 1120 1121

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

1122
	mutex_unlock(&hwmgr->smu_lock);
1123 1124 1125 1126

	return ret;
}

1127
static int pp_display_configuration_change(void *handle,
1128
	const struct amd_pp_display_configuration *display_config)
1129
{
1130
	struct pp_hwmgr *hwmgr = handle;
1131
	int ret = 0;
1132

1133
	ret = pp_check(hwmgr);
1134

1135
	if (ret)
1136
		return ret;
1137

1138
	mutex_lock(&hwmgr->smu_lock);
1139
	phm_store_dal_configuration_data(hwmgr, display_config);
1140
	mutex_unlock(&hwmgr->smu_lock);
1141 1142
	return 0;
}
1143

1144
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
1145
		struct amd_pp_simple_clock_info *output)
1146
{
1147
	struct pp_hwmgr *hwmgr = handle;
1148
	int ret = 0;
1149

1150
	ret = pp_check(hwmgr);
1151

1152
	if (ret)
1153
		return ret;
1154

1155 1156
	if (output == NULL)
		return -EINVAL;
1157

1158
	mutex_lock(&hwmgr->smu_lock);
1159
	ret = phm_get_dal_power_level(hwmgr, output);
1160
	mutex_unlock(&hwmgr->smu_lock);
1161
	return ret;
1162
}
1163

1164
static int pp_get_current_clocks(void *handle,
1165
		struct amd_pp_clock_info *clocks)
1166 1167 1168
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1169
	struct pp_hwmgr *hwmgr = handle;
1170
	int ret = 0;
1171

1172
	ret = pp_check(hwmgr);
1173

1174
	if (ret)
1175
		return ret;
1176

1177
	mutex_lock(&hwmgr->smu_lock);
1178

1179 1180
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1181 1182 1183 1184 1185 1186 1187 1188
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1189
	if (ret) {
1190
		pr_info("Error in phm_get_clock_info \n");
1191
		mutex_unlock(&hwmgr->smu_lock);
1192
		return -EINVAL;
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1211
	mutex_unlock(&hwmgr->smu_lock);
1212 1213 1214
	return 0;
}

1215
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1216
{
1217
	struct pp_hwmgr *hwmgr = handle;
1218
	int ret = 0;
1219

1220
	ret = pp_check(hwmgr);
1221

1222
	if (ret)
1223 1224
		return ret;

1225
	if (clocks == NULL)
1226 1227
		return -EINVAL;

1228
	mutex_lock(&hwmgr->smu_lock);
1229
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1230
	mutex_unlock(&hwmgr->smu_lock);
1231
	return ret;
1232 1233
}

1234
static int pp_get_clock_by_type_with_latency(void *handle,
1235 1236 1237
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1238
	struct pp_hwmgr *hwmgr = handle;
1239 1240
	int ret = 0;

1241
	ret = pp_check(hwmgr);
1242
	if (ret)
1243 1244 1245 1246 1247
		return ret;

	if (!clocks)
		return -EINVAL;

1248
	mutex_lock(&hwmgr->smu_lock);
1249
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1250
	mutex_unlock(&hwmgr->smu_lock);
1251 1252 1253
	return ret;
}

1254
static int pp_get_clock_by_type_with_voltage(void *handle,
1255 1256 1257
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1258
	struct pp_hwmgr *hwmgr = handle;
1259 1260
	int ret = 0;

1261
	ret = pp_check(hwmgr);
1262
	if (ret)
1263 1264 1265 1266 1267
		return ret;

	if (!clocks)
		return -EINVAL;

1268
	mutex_lock(&hwmgr->smu_lock);
1269 1270 1271

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1272
	mutex_unlock(&hwmgr->smu_lock);
1273 1274 1275
	return ret;
}

1276
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1277 1278
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1279
	struct pp_hwmgr *hwmgr = handle;
1280 1281
	int ret = 0;

1282
	ret = pp_check(hwmgr);
1283
	if (ret)
1284 1285 1286 1287 1288
		return ret;

	if (!wm_with_clock_ranges)
		return -EINVAL;

1289
	mutex_lock(&hwmgr->smu_lock);
1290 1291
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1292
	mutex_unlock(&hwmgr->smu_lock);
1293 1294 1295 1296

	return ret;
}

1297
static int pp_display_clock_voltage_request(void *handle,
1298 1299
		struct pp_display_clock_request *clock)
{
1300
	struct pp_hwmgr *hwmgr = handle;
1301 1302
	int ret = 0;

1303
	ret = pp_check(hwmgr);
1304
	if (ret)
1305 1306 1307 1308 1309
		return ret;

	if (!clock)
		return -EINVAL;

1310
	mutex_lock(&hwmgr->smu_lock);
1311
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1312
	mutex_unlock(&hwmgr->smu_lock);
1313 1314 1315 1316

	return ret;
}

1317
static int pp_get_display_mode_validation_clocks(void *handle,
1318
		struct amd_pp_simple_clock_info *clocks)
1319
{
1320
	struct pp_hwmgr *hwmgr = handle;
1321
	int ret = 0;
1322

1323
	ret = pp_check(hwmgr);
1324

1325
	if (ret)
1326 1327 1328 1329
		return ret;

	if (clocks == NULL)
		return -EINVAL;
1330

1331
	mutex_lock(&hwmgr->smu_lock);
1332

1333
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1334
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1335

1336
	mutex_unlock(&hwmgr->smu_lock);
1337
	return ret;
1338 1339
}

1340 1341
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1342
	struct pp_hwmgr *hwmgr = handle;
1343 1344
	int ret = 0;

1345
	ret = pp_check(hwmgr);
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1358
static const struct amd_pm_funcs pp_dpm_funcs = {
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1385
	.notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1386 1387
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1388
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1389 1390
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1403
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1404
};