amd_powerplay.c 30.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28
#include <linux/firmware.h>
29 30
#include "amd_shared.h"
#include "amd_powerplay.h"
31
#include "power_state.h"
32
#include "amdgpu.h"
R
Rex Zhu 已提交
33
#include "hwmgr.h"
34

R
Rex Zhu 已提交
35

36
static const struct amd_pm_funcs pp_dpm_funcs;
37

38
static int amd_powerplay_create(struct amdgpu_device *adev)
39
{
40
	struct pp_hwmgr *hwmgr;
41

42
	if (adev == NULL)
43 44
		return -EINVAL;

45 46
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
47 48
		return -ENOMEM;

49
	hwmgr->adev = adev;
50 51
	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52 53 54 55
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
56
	hwmgr->feature_mask = adev->powerplay.pp_feature;
57
	hwmgr->display_config = &adev->pm.pm_display_cfg;
58 59
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 61 62
	return 0;
}

63

64
static void amd_powerplay_destroy(struct amdgpu_device *adev)
65
{
66
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67

68 69
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
70

71 72
	kfree(hwmgr);
	hwmgr = NULL;
73 74
}

75 76 77
static int pp_early_init(void *handle)
{
	int ret;
78
	struct amdgpu_device *adev = handle;
79

80
	ret = amd_powerplay_create(adev);
81

82 83 84
	if (ret != 0)
		return ret;

85
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
86
	if (ret)
87
		return -EINVAL;
88

89
	return 0;
90 91
}

92
static int pp_sw_init(void *handle)
93
{
94 95
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96 97
	int ret = 0;

98
	ret = hwmgr_sw_init(hwmgr);
99

100
	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101

102 103
	return ret;
}
104

105 106
static int pp_sw_fini(void *handle)
{
107 108
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109

110
	hwmgr_sw_fini(hwmgr);
111

112 113 114
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
		release_firmware(adev->pm.fw);
		adev->pm.fw = NULL;
115
		amdgpu_ucode_fini_bo(adev);
116
	}
117

118
	return 0;
119 120 121 122
}

static int pp_hw_init(void *handle)
{
123
	int ret = 0;
124 125
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
126

127 128
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
129

130
	ret = hwmgr_hw_init(hwmgr);
131

132 133
	if (ret)
		pr_err("powerplay hw init failed\n");
134

135
	return ret;
136 137 138 139
}

static int pp_hw_fini(void *handle)
{
140 141
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
142

143
	hwmgr_hw_fini(hwmgr);
144

145 146 147
	return 0;
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
{
	int r = -EINVAL;
	void *cpu_ptr = NULL;
	uint64_t gpu_addr;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
						&adev->pm.smu_prv_buffer,
						&gpu_addr,
						&cpu_ptr)) {
		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
		return;
	}

	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
					lower_32_bits((unsigned long)cpu_ptr),
					upper_32_bits((unsigned long)cpu_ptr),
					lower_32_bits(gpu_addr),
					upper_32_bits(gpu_addr),
					adev->pm.smu_prv_buffer_size);

	if (r) {
		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
		adev->pm.smu_prv_buffer = NULL;
		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
	}
}

R
Rex Zhu 已提交
179 180
static int pp_late_init(void *handle)
{
181 182 183
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

184 185 186
	if (hwmgr && hwmgr->pm_en) {
		mutex_lock(&hwmgr->smu_lock);
		hwmgr_handle_task(hwmgr,
187
					AMD_PP_TASK_COMPLETE_INIT, NULL);
188 189
		mutex_unlock(&hwmgr->smu_lock);
	}
190 191
	if (adev->pm.smu_prv_buffer_size != 0)
		pp_reserve_vram_for_smu(adev);
192

R
Rex Zhu 已提交
193 194 195
	return 0;
}

196 197
static void pp_late_fini(void *handle)
{
198 199
	struct amdgpu_device *adev = handle;

200 201
	if (adev->pm.smu_prv_buffer)
		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
202
	amd_powerplay_destroy(adev);
203 204 205
}


206 207
static bool pp_is_idle(void *handle)
{
208
	return false;
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
224
	return 0;
225 226 227 228
}

static int pp_suspend(void *handle)
{
229 230
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
231

232
	return hwmgr_suspend(hwmgr);
233 234 235 236
}

static int pp_resume(void *handle)
{
237 238
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
239

240
	return hwmgr_resume(hwmgr);
241 242
}

243 244 245 246 247 248
static int pp_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	return 0;
}

249
static const struct amd_ip_funcs pp_ip_funcs = {
250
	.name = "powerplay",
251
	.early_init = pp_early_init,
R
Rex Zhu 已提交
252
	.late_init = pp_late_init,
253 254 255 256
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
257
	.late_fini = pp_late_fini,
258 259 260 261 262
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
263
	.set_clockgating_state = pp_set_clockgating_state,
264 265 266
	.set_powergating_state = pp_set_powergating_state,
};

267 268 269 270 271 272 273 274 275
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

276 277 278 279 280 281 282 283 284 285
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

286 287
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
288
	struct pp_hwmgr *hwmgr = handle;
289

290 291
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
292 293 294 295 296 297 298 299 300

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

301 302 303 304 305 306 307 308 309 310 311 312 313
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
314
			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
315 316
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
317
			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
318 319 320 321 322 323 324 325 326
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
327
			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
328 329
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
330
			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
331 332 333 334 335 336
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

337 338 339
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
340
	struct pp_hwmgr *hwmgr = handle;
341

342 343
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
344

345 346 347
	if (level == hwmgr->dpm_level)
		return 0;

348
	mutex_lock(&hwmgr->smu_lock);
349 350
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
351 352
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
353

354 355
	return 0;
}
356

357 358 359
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
360
	struct pp_hwmgr *hwmgr = handle;
361
	enum amd_dpm_forced_level level;
362

363 364
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
365

366
	mutex_lock(&hwmgr->smu_lock);
367
	level = hwmgr->dpm_level;
368
	mutex_unlock(&hwmgr->smu_lock);
369
	return level;
370
}
371

372
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
373
{
374
	struct pp_hwmgr *hwmgr = handle;
375
	uint32_t clk = 0;
376

377 378
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
379

380
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
381
		pr_info("%s was not implemented.\n", __func__);
382 383
		return 0;
	}
384
	mutex_lock(&hwmgr->smu_lock);
385
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
386
	mutex_unlock(&hwmgr->smu_lock);
387
	return clk;
388
}
389

390
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
391
{
392
	struct pp_hwmgr *hwmgr = handle;
393
	uint32_t clk = 0;
394

395 396
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
397

398
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
399
		pr_info("%s was not implemented.\n", __func__);
400 401
		return 0;
	}
402
	mutex_lock(&hwmgr->smu_lock);
403
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
404
	mutex_unlock(&hwmgr->smu_lock);
405
	return clk;
406
}
407

408
static void pp_dpm_powergate_vce(void *handle, bool gate)
409
{
410
	struct pp_hwmgr *hwmgr = handle;
411

412
	if (!hwmgr || !hwmgr->pm_en)
413
		return;
414

415
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
416
		pr_info("%s was not implemented.\n", __func__);
417
		return;
418
	}
419
	mutex_lock(&hwmgr->smu_lock);
420
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
421
	mutex_unlock(&hwmgr->smu_lock);
422
}
423

424
static void pp_dpm_powergate_uvd(void *handle, bool gate)
425
{
426
	struct pp_hwmgr *hwmgr = handle;
427

428
	if (!hwmgr || !hwmgr->pm_en)
429
		return;
430

431
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
432
		pr_info("%s was not implemented.\n", __func__);
433
		return;
434
	}
435
	mutex_lock(&hwmgr->smu_lock);
436
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
437
	mutex_unlock(&hwmgr->smu_lock);
438 439
}

440
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
441
		enum amd_pm_state_type *user_state)
442
{
443
	int ret = 0;
444
	struct pp_hwmgr *hwmgr = handle;
445

446 447
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
448

449 450 451
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
452

453
	return ret;
454
}
455

456
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
457
{
458
	struct pp_hwmgr *hwmgr = handle;
459
	struct pp_power_state *state;
460
	enum amd_pm_state_type pm_type;
461

462
	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
463 464
		return -EINVAL;

465
	mutex_lock(&hwmgr->smu_lock);
466

467 468 469 470
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
471
		pm_type = POWER_STATE_TYPE_BATTERY;
472
		break;
473
	case PP_StateUILabel_Balanced:
474
		pm_type = POWER_STATE_TYPE_BALANCED;
475
		break;
476
	case PP_StateUILabel_Performance:
477
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
478
		break;
479
	default:
480
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
481
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
482
		else
483
			pm_type = POWER_STATE_TYPE_DEFAULT;
484
		break;
485
	}
486
	mutex_unlock(&hwmgr->smu_lock);
487 488

	return pm_type;
489
}
490

491
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
492
{
493
	struct pp_hwmgr *hwmgr = handle;
494

495
	if (!hwmgr || !hwmgr->pm_en)
496
		return;
497

498
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
499
		pr_info("%s was not implemented.\n", __func__);
500
		return;
501
	}
502
	mutex_lock(&hwmgr->smu_lock);
503
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
504
	mutex_unlock(&hwmgr->smu_lock);
505 506
}

507
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
508
{
509
	struct pp_hwmgr *hwmgr = handle;
510
	uint32_t mode = 0;
511

512 513
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
514

515
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
516
		pr_info("%s was not implemented.\n", __func__);
517 518
		return 0;
	}
519
	mutex_lock(&hwmgr->smu_lock);
520
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
521
	mutex_unlock(&hwmgr->smu_lock);
522
	return mode;
523 524 525 526
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
527
	struct pp_hwmgr *hwmgr = handle;
528
	int ret = 0;
529

530 531
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
532

533
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
534
		pr_info("%s was not implemented.\n", __func__);
535 536
		return 0;
	}
537
	mutex_lock(&hwmgr->smu_lock);
538
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
539
	mutex_unlock(&hwmgr->smu_lock);
540
	return ret;
541 542 543 544
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
545
	struct pp_hwmgr *hwmgr = handle;
546
	int ret = 0;
547

548 549
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
550

551
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
552
		pr_info("%s was not implemented.\n", __func__);
553 554
		return 0;
	}
555

556
	mutex_lock(&hwmgr->smu_lock);
557
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
558
	mutex_unlock(&hwmgr->smu_lock);
559
	return ret;
560 561
}

562 563
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
564
	struct pp_hwmgr *hwmgr = handle;
565
	int ret = 0;
566

567 568
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
569 570 571 572

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

573
	mutex_lock(&hwmgr->smu_lock);
574
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
575
	mutex_unlock(&hwmgr->smu_lock);
576
	return ret;
577 578
}

579 580 581
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
582
	struct pp_hwmgr *hwmgr = handle;
583 584
	int i;

585 586
	memset(data, 0, sizeof(*data));

587
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
588 589
		return -EINVAL;

590
	mutex_lock(&hwmgr->smu_lock);
591

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
614
	mutex_unlock(&hwmgr->smu_lock);
615 616 617 618 619
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
620
	struct pp_hwmgr *hwmgr = handle;
621
	int size = 0;
622

623
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
624 625
		return -EINVAL;

626
	mutex_lock(&hwmgr->smu_lock);
627
	*table = (char *)hwmgr->soft_pp_table;
628
	size = hwmgr->soft_pp_table_size;
629
	mutex_unlock(&hwmgr->smu_lock);
630
	return size;
631 632
}

633 634
static int amd_powerplay_reset(void *handle)
{
635
	struct pp_hwmgr *hwmgr = handle;
636 637
	int ret;

638
	ret = hwmgr_hw_fini(hwmgr);
639 640 641
	if (ret)
		return ret;

642
	ret = hwmgr_hw_init(hwmgr);
643 644 645
	if (ret)
		return ret;

646
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
647 648
}

649 650
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
651
	struct pp_hwmgr *hwmgr = handle;
652
	int ret = -ENOMEM;
653

654 655
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
656

657
	mutex_lock(&hwmgr->smu_lock);
658
	if (!hwmgr->hardcode_pp_table) {
659 660 661
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
662 663
		if (!hwmgr->hardcode_pp_table)
			goto err;
664
	}
665

666 667 668 669
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;

670 671
	ret = amd_powerplay_reset(handle);
	if (ret)
672
		goto err;
673 674 675 676

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
677
			goto err;
678
	}
679
	mutex_unlock(&hwmgr->smu_lock);
680
	return 0;
681 682 683
err:
	mutex_unlock(&hwmgr->smu_lock);
	return ret;
684 685 686
}

static int pp_dpm_force_clock_level(void *handle,
687
		enum pp_clock_type type, uint32_t mask)
688
{
689
	struct pp_hwmgr *hwmgr = handle;
690
	int ret = 0;
691

692 693
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
694

695
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
696
		pr_info("%s was not implemented.\n", __func__);
697 698
		return 0;
	}
699
	mutex_lock(&hwmgr->smu_lock);
700 701 702 703
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
704
	mutex_unlock(&hwmgr->smu_lock);
705
	return ret;
706 707 708 709 710
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
711
	struct pp_hwmgr *hwmgr = handle;
712
	int ret = 0;
713

714 715
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
716

717
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
718
		pr_info("%s was not implemented.\n", __func__);
719 720
		return 0;
	}
721
	mutex_lock(&hwmgr->smu_lock);
722
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
723
	mutex_unlock(&hwmgr->smu_lock);
724
	return ret;
725 726
}

727 728
static int pp_dpm_get_sclk_od(void *handle)
{
729
	struct pp_hwmgr *hwmgr = handle;
730
	int ret = 0;
731

732 733
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
734 735

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
736
		pr_info("%s was not implemented.\n", __func__);
737 738
		return 0;
	}
739
	mutex_lock(&hwmgr->smu_lock);
740
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
741
	mutex_unlock(&hwmgr->smu_lock);
742
	return ret;
743 744 745 746
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
747
	struct pp_hwmgr *hwmgr = handle;
748
	int ret = 0;
749

750 751
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
752 753

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
754
		pr_info("%s was not implemented.\n", __func__);
755 756 757
		return 0;
	}

758
	mutex_lock(&hwmgr->smu_lock);
759
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
760
	mutex_unlock(&hwmgr->smu_lock);
761
	return ret;
762 763
}

764 765
static int pp_dpm_get_mclk_od(void *handle)
{
766
	struct pp_hwmgr *hwmgr = handle;
767
	int ret = 0;
768

769 770
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
771 772

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
773
		pr_info("%s was not implemented.\n", __func__);
774 775
		return 0;
	}
776
	mutex_lock(&hwmgr->smu_lock);
777
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
778
	mutex_unlock(&hwmgr->smu_lock);
779
	return ret;
780 781 782 783
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
784
	struct pp_hwmgr *hwmgr = handle;
785
	int ret = 0;
786

787 788
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
789 790

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
791
		pr_info("%s was not implemented.\n", __func__);
792 793
		return 0;
	}
794
	mutex_lock(&hwmgr->smu_lock);
795
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
796
	mutex_unlock(&hwmgr->smu_lock);
797
	return ret;
798 799
}

800 801
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
802
{
803
	struct pp_hwmgr *hwmgr = handle;
804
	int ret = 0;
805

806
	if (!hwmgr || !hwmgr->pm_en || !value)
807 808 809 810 811 812 813 814
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
815
		return 0;
816
	default:
817
		mutex_lock(&hwmgr->smu_lock);
818
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
819
		mutex_unlock(&hwmgr->smu_lock);
820
		return ret;
821 822 823
	}
}

824 825 826
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
827
	struct pp_hwmgr *hwmgr = handle;
828

829
	if (!hwmgr || !hwmgr->pm_en)
830 831
		return NULL;

832
	if (idx < hwmgr->num_vce_state_tables)
833
		return &hwmgr->vce_states[idx];
834 835 836
	return NULL;
}

837 838
static int pp_get_power_profile_mode(void *handle, char *buf)
{
839
	struct pp_hwmgr *hwmgr = handle;
840

841
	if (!hwmgr || !hwmgr->pm_en || !buf)
842 843 844 845 846 847 848 849 850 851 852 853
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
854
	struct pp_hwmgr *hwmgr = handle;
855
	int ret = -EINVAL;
856

857 858
	if (!hwmgr || !hwmgr->pm_en)
		return ret;
859 860 861

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
862
		return ret;
863
	}
864
	mutex_lock(&hwmgr->smu_lock);
865 866
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
867
	mutex_unlock(&hwmgr->smu_lock);
868
	return ret;
869 870
}

871 872
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
873
	struct pp_hwmgr *hwmgr = handle;
874

875
	if (!hwmgr || !hwmgr->pm_en)
876 877 878 879 880 881 882 883 884 885
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

886
static int pp_dpm_switch_power_profile(void *handle,
887
		enum PP_SMC_POWER_PROFILE type, bool en)
888
{
889
	struct pp_hwmgr *hwmgr = handle;
890 891
	long workload;
	uint32_t index;
892

893
	if (!hwmgr || !hwmgr->pm_en)
894 895
		return -EINVAL;

896 897 898 899 900 901 902 903
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

904
	mutex_lock(&hwmgr->smu_lock);
905 906 907 908 909 910 911 912 913 914 915

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
916 917
	}

918 919
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
920
	mutex_unlock(&hwmgr->smu_lock);
921

922 923 924
	return 0;
}

925 926
static int pp_set_power_limit(void *handle, uint32_t limit)
{
927
	struct pp_hwmgr *hwmgr = handle;
928

929 930
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
931 932 933 934 935 936 937 938 939 940 941 942

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

943
	mutex_lock(&hwmgr->smu_lock);
944 945
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
946
	mutex_unlock(&hwmgr->smu_lock);
947
	return 0;
948 949 950 951
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
952
	struct pp_hwmgr *hwmgr = handle;
953

954
	if (!hwmgr || !hwmgr->pm_en ||!limit)
955 956
		return -EINVAL;

957
	mutex_lock(&hwmgr->smu_lock);
958 959 960 961 962 963

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

964
	mutex_unlock(&hwmgr->smu_lock);
965

966
	return 0;
967 968
}

969
static int pp_display_configuration_change(void *handle,
970
	const struct amd_pp_display_configuration *display_config)
971
{
972
	struct pp_hwmgr *hwmgr = handle;
973

974 975
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
976

977
	mutex_lock(&hwmgr->smu_lock);
978
	phm_store_dal_configuration_data(hwmgr, display_config);
979
	mutex_unlock(&hwmgr->smu_lock);
980 981
	return 0;
}
982

983
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
984
		struct amd_pp_simple_clock_info *output)
985
{
986
	struct pp_hwmgr *hwmgr = handle;
987
	int ret = 0;
988

989
	if (!hwmgr || !hwmgr->pm_en ||!output)
990
		return -EINVAL;
991

992
	mutex_lock(&hwmgr->smu_lock);
993
	ret = phm_get_dal_power_level(hwmgr, output);
994
	mutex_unlock(&hwmgr->smu_lock);
995
	return ret;
996
}
997

998
static int pp_get_current_clocks(void *handle,
999
		struct amd_pp_clock_info *clocks)
1000
{
1001
	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1002
	struct pp_clock_info hw_clocks;
1003
	struct pp_hwmgr *hwmgr = handle;
1004
	int ret = 0;
1005

1006 1007
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1008

1009
	mutex_lock(&hwmgr->smu_lock);
1010

1011 1012
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1013 1014 1015 1016 1017 1018 1019 1020
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1021
	if (ret) {
1022
		pr_info("Error in phm_get_clock_info \n");
1023
		mutex_unlock(&hwmgr->smu_lock);
1024
		return -EINVAL;
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

1037 1038 1039 1040
	if (simple_clocks.level == 0)
		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
	else
		clocks->max_clocks_state = simple_clocks.level;
1041 1042 1043 1044 1045

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1046
	mutex_unlock(&hwmgr->smu_lock);
1047 1048 1049
	return 0;
}

1050
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1051
{
1052
	struct pp_hwmgr *hwmgr = handle;
1053
	int ret = 0;
1054

1055 1056
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1057

1058
	if (clocks == NULL)
1059 1060
		return -EINVAL;

1061
	mutex_lock(&hwmgr->smu_lock);
1062
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1063
	mutex_unlock(&hwmgr->smu_lock);
1064
	return ret;
1065 1066
}

1067
static int pp_get_clock_by_type_with_latency(void *handle,
1068 1069 1070
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1071
	struct pp_hwmgr *hwmgr = handle;
1072 1073
	int ret = 0;

1074
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1075 1076
		return -EINVAL;

1077
	mutex_lock(&hwmgr->smu_lock);
1078
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1079
	mutex_unlock(&hwmgr->smu_lock);
1080 1081 1082
	return ret;
}

1083
static int pp_get_clock_by_type_with_voltage(void *handle,
1084 1085 1086
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1087
	struct pp_hwmgr *hwmgr = handle;
1088 1089
	int ret = 0;

1090
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1091 1092
		return -EINVAL;

1093
	mutex_lock(&hwmgr->smu_lock);
1094 1095 1096

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1097
	mutex_unlock(&hwmgr->smu_lock);
1098 1099 1100
	return ret;
}

1101
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1102
		void *clock_ranges)
1103
{
1104
	struct pp_hwmgr *hwmgr = handle;
1105 1106
	int ret = 0;

1107
	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1108 1109
		return -EINVAL;

1110
	mutex_lock(&hwmgr->smu_lock);
1111
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1112
			clock_ranges);
1113
	mutex_unlock(&hwmgr->smu_lock);
1114 1115 1116 1117

	return ret;
}

1118
static int pp_display_clock_voltage_request(void *handle,
1119 1120
		struct pp_display_clock_request *clock)
{
1121
	struct pp_hwmgr *hwmgr = handle;
1122 1123
	int ret = 0;

1124
	if (!hwmgr || !hwmgr->pm_en ||!clock)
1125 1126
		return -EINVAL;

1127
	mutex_lock(&hwmgr->smu_lock);
1128
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1129
	mutex_unlock(&hwmgr->smu_lock);
1130 1131 1132 1133

	return ret;
}

1134
static int pp_get_display_mode_validation_clocks(void *handle,
1135
		struct amd_pp_simple_clock_info *clocks)
1136
{
1137
	struct pp_hwmgr *hwmgr = handle;
1138
	int ret = 0;
1139

1140
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1141
		return -EINVAL;
1142

1143 1144
	clocks->level = PP_DAL_POWERLEVEL_7;

1145
	mutex_lock(&hwmgr->smu_lock);
1146

1147
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1148
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1149

1150
	mutex_unlock(&hwmgr->smu_lock);
1151
	return ret;
1152 1153
}

1154
static int pp_dpm_powergate_mmhub(void *handle)
1155
{
1156
	struct pp_hwmgr *hwmgr = handle;
1157

1158 1159
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1160

1161
	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1162 1163 1164 1165
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

1166
	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1167 1168
}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
static int pp_dpm_powergate_gfx(void *handle, bool gate)
{
	struct pp_hwmgr *hwmgr = handle;

	if (!hwmgr || !hwmgr->pm_en)
		return 0;

	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
}

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
static int pp_set_powergating_by_smu(void *handle,
				uint32_t block_type, bool gate)
{
	int ret = 0;

	switch (block_type) {
	case AMD_IP_BLOCK_TYPE_UVD:
	case AMD_IP_BLOCK_TYPE_VCN:
		pp_dpm_powergate_uvd(handle, gate);
		break;
	case AMD_IP_BLOCK_TYPE_VCE:
		pp_dpm_powergate_vce(handle, gate);
		break;
	case AMD_IP_BLOCK_TYPE_GMC:
		pp_dpm_powergate_mmhub(handle);
		break;
	case AMD_IP_BLOCK_TYPE_GFX:
1201
		ret = pp_dpm_powergate_gfx(handle, gate);
1202 1203 1204 1205 1206 1207 1208
		break;
	default:
		break;
	}
	return ret;
}

1209 1210 1211 1212 1213
static int pp_notify_smu_enable_pwe(void *handle)
{
	struct pp_hwmgr *hwmgr = handle;

	if (!hwmgr || !hwmgr->pm_en)
1214
		return -EINVAL;
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;;
	}

	mutex_lock(&hwmgr->smu_lock);
	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
	mutex_unlock(&hwmgr->smu_lock);

	return 0;
}

1228
static const struct amd_pm_funcs pp_dpm_funcs = {
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1253
	.set_powergating_by_smu = pp_set_powergating_by_smu,
1254 1255
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1256
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1257 1258
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1271
	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1272
};