amd_powerplay.c 30.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28 29
#include "amd_shared.h"
#include "amd_powerplay.h"
30
#include "power_state.h"
31
#include "amdgpu.h"
R
Rex Zhu 已提交
32
#include "hwmgr.h"
33

34 35
#define PP_DPM_DISABLED 0xCCCC

R
Rex Zhu 已提交
36
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37
		enum amd_pm_state_type *user_state);
R
Rex Zhu 已提交
38

39
static const struct amd_pm_funcs pp_dpm_funcs;
40

41 42 43
static inline int pp_check(struct pp_hwmgr *hwmgr)
{
	if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 45
		return -EINVAL;

46
	if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47
		return PP_DPM_DISABLED;
48

49 50
	return 0;
}
51

52
static int amd_powerplay_create(struct amdgpu_device *adev)
53
{
54
	struct pp_hwmgr *hwmgr;
55

56
	if (adev == NULL)
57 58
		return -EINVAL;

59 60
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
61 62
		return -ENOMEM;

63 64 65 66 67 68 69 70 71
	hwmgr->adev = adev;
	hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
	hwmgr->feature_mask = amdgpu_pp_feature_mask;
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
72 73 74
	return 0;
}

75

76
static int amd_powerplay_destroy(struct amdgpu_device *adev)
77
{
78
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79

80 81
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
82

83 84
	kfree(hwmgr);
	hwmgr = NULL;
85 86 87 88

	return 0;
}

89 90 91
static int pp_early_init(void *handle)
{
	int ret;
92
	struct amdgpu_device *adev = handle;
93

94
	ret = amd_powerplay_create(adev);
95

96 97 98
	if (ret != 0)
		return ret;

99
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
100
	if (ret)
101
		return -EINVAL;
102

103
	return 0;
104 105
}

106
static int pp_sw_init(void *handle)
107
{
108 109
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 111
	int ret = 0;

112
	ret = pp_check(hwmgr);
113

114
	if (ret >= 0) {
115
		if (hwmgr->smumgr_funcs->smu_init == NULL)
116
			return -EINVAL;
117

118
		ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119

120
		pr_debug("amdgpu: powerplay sw initialized\n");
121
	}
122

123 124
	return ret;
}
125

126 127
static int pp_sw_fini(void *handle)
{
128 129
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
130 131
	int ret = 0;

132
	ret = pp_check(hwmgr);
133
	if (ret >= 0) {
134 135
		if (hwmgr->smumgr_funcs->smu_fini != NULL)
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
136
	}
137
	return 0;
138 139 140 141
}

static int pp_hw_init(void *handle)
{
142
	int ret = 0;
143 144
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
145

146 147
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
148

149
	ret = pp_check(hwmgr);
150

151
	if (ret >= 0) {
152
		if (hwmgr->smumgr_funcs->start_smu == NULL)
153
			return -EINVAL;
154

155
		if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
156
			pr_err("smc start failed\n");
157
			hwmgr->smumgr_funcs->smu_fini(hwmgr);
158
			return -EINVAL;
159 160
		}
		if (ret == PP_DPM_DISABLED)
161
			goto exit;
162
		ret = hwmgr_hw_init(hwmgr);
163 164
		if (ret)
			goto exit;
165
	}
166 167
	return ret;
exit:
168
	hwmgr->pm_en = 0;
169 170 171
	cgs_notify_dpm_enabled(hwmgr->device, false);
	return 0;

172 173 174 175
}

static int pp_hw_fini(void *handle)
{
176 177
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
178
	int ret = 0;
179

180
	ret = pp_check(hwmgr);
181
	if (ret == 0)
182
		hwmgr_hw_fini(hwmgr);
183

184 185 186
	return 0;
}

R
Rex Zhu 已提交
187 188
static int pp_late_init(void *handle)
{
189 190
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
R
Rex Zhu 已提交
191 192
	int ret = 0;

193 194
	ret = pp_check(hwmgr);

R
Rex Zhu 已提交
195
	if (ret == 0)
196
		pp_dpm_dispatch_tasks(hwmgr,
197
					AMD_PP_TASK_COMPLETE_INIT, NULL);
R
Rex Zhu 已提交
198 199 200 201

	return 0;
}

202 203 204 205 206 207
static void pp_late_fini(void *handle)
{
	amd_powerplay_destroy(handle);
}


208 209
static bool pp_is_idle(void *handle)
{
210
	return false;
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
226 227
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
228
	int ret = 0;
229

230
	ret = pp_check(hwmgr);
231

232
	if (ret)
233
		return ret;
234

235
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
236
		pr_info("%s was not implemented.\n", __func__);
237 238
		return 0;
	}
239 240 241

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
242
			state == AMD_PG_STATE_GATE);
243 244 245 246
}

static int pp_suspend(void *handle)
{
247 248
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
249
	int ret = 0;
250

251
	ret = pp_check(hwmgr);
252
	if (ret == 0)
253
		hwmgr_hw_suspend(hwmgr);
254
	return 0;
255 256 257 258
}

static int pp_resume(void *handle)
{
259 260
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
261
	int ret;
262

263
	ret = pp_check(hwmgr);
264

265 266
	if (ret < 0)
		return ret;
267

268
	if (hwmgr->smumgr_funcs->start_smu == NULL)
269 270
		return -EINVAL;

271
	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
272
		pr_err("smc start failed\n");
273
		hwmgr->smumgr_funcs->smu_fini(hwmgr);
274
		return -EINVAL;
275 276
	}

277
	if (ret == PP_DPM_DISABLED)
M
Monk Liu 已提交
278
		return 0;
279

280
	return hwmgr_hw_resume(hwmgr);
281 282
}

283
static const struct amd_ip_funcs pp_ip_funcs = {
284
	.name = "powerplay",
285
	.early_init = pp_early_init,
R
Rex Zhu 已提交
286
	.late_init = pp_late_init,
287 288 289 290
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
291
	.late_fini = pp_late_fini,
292 293 294 295 296
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
297
	.set_clockgating_state = NULL,
298 299 300
	.set_powergating_state = pp_set_powergating_state,
};

301 302 303 304 305 306 307 308 309
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

310 311 312 313 314 315 316 317 318 319
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

320 321
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
322
	struct pp_hwmgr *hwmgr = handle;
323 324
	int ret = 0;

325
	ret = pp_check(hwmgr);
326 327 328 329 330 331 332 333 334 335 336 337

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
			cgs_set_clockgating_state(hwmgr->device,
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
			cgs_set_clockgating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

374 375 376
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
377
	struct pp_hwmgr *hwmgr = handle;
378
	int ret = 0;
379

380
	ret = pp_check(hwmgr);
381

382
	if (ret)
383
		return ret;
384

385 386 387
	if (level == hwmgr->dpm_level)
		return 0;

388
	mutex_lock(&hwmgr->smu_lock);
389 390
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
391 392
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
393

394 395
	return 0;
}
396

397 398 399
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
400
	struct pp_hwmgr *hwmgr = handle;
401
	int ret = 0;
402
	enum amd_dpm_forced_level level;
403

404
	ret = pp_check(hwmgr);
405

406
	if (ret)
407
		return ret;
408

409
	mutex_lock(&hwmgr->smu_lock);
410
	level = hwmgr->dpm_level;
411
	mutex_unlock(&hwmgr->smu_lock);
412
	return level;
413
}
414

415
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
416
{
417
	struct pp_hwmgr *hwmgr = handle;
418
	int ret = 0;
419
	uint32_t clk = 0;
420

421
	ret = pp_check(hwmgr);
422

423
	if (ret)
424
		return ret;
425

426
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
427
		pr_info("%s was not implemented.\n", __func__);
428 429
		return 0;
	}
430
	mutex_lock(&hwmgr->smu_lock);
431
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
432
	mutex_unlock(&hwmgr->smu_lock);
433
	return clk;
434
}
435

436
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
437
{
438
	struct pp_hwmgr *hwmgr = handle;
439
	int ret = 0;
440
	uint32_t clk = 0;
441

442
	ret = pp_check(hwmgr);
443

444
	if (ret)
445
		return ret;
446

447
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
448
		pr_info("%s was not implemented.\n", __func__);
449 450
		return 0;
	}
451
	mutex_lock(&hwmgr->smu_lock);
452
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
453
	mutex_unlock(&hwmgr->smu_lock);
454
	return clk;
455
}
456

457
static void pp_dpm_powergate_vce(void *handle, bool gate)
458
{
459
	struct pp_hwmgr *hwmgr = handle;
460
	int ret = 0;
461

462
	ret = pp_check(hwmgr);
463

464
	if (ret)
465
		return;
466

467
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
468
		pr_info("%s was not implemented.\n", __func__);
469
		return;
470
	}
471
	mutex_lock(&hwmgr->smu_lock);
472
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
473
	mutex_unlock(&hwmgr->smu_lock);
474
}
475

476
static void pp_dpm_powergate_uvd(void *handle, bool gate)
477
{
478
	struct pp_hwmgr *hwmgr = handle;
479
	int ret = 0;
480

481
	ret = pp_check(hwmgr);
482

483
	if (ret)
484
		return;
485

486
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
487
		pr_info("%s was not implemented.\n", __func__);
488
		return;
489
	}
490
	mutex_lock(&hwmgr->smu_lock);
491
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
492
	mutex_unlock(&hwmgr->smu_lock);
493 494
}

495
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
496
		enum amd_pm_state_type *user_state)
497
{
498
	int ret = 0;
499
	struct pp_hwmgr *hwmgr = handle;
500

501
	ret = pp_check(hwmgr);
502

503
	if (ret)
504
		return ret;
505

506 507 508
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
509

510
	return ret;
511
}
512

513
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
514
{
515
	struct pp_hwmgr *hwmgr = handle;
516
	struct pp_power_state *state;
517
	int ret = 0;
518
	enum amd_pm_state_type pm_type;
519

520
	ret = pp_check(hwmgr);
521

522
	if (ret)
523
		return ret;
524

525
	if (hwmgr->current_ps == NULL)
526 527
		return -EINVAL;

528
	mutex_lock(&hwmgr->smu_lock);
529

530 531 532 533
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
534
		pm_type = POWER_STATE_TYPE_BATTERY;
535
		break;
536
	case PP_StateUILabel_Balanced:
537
		pm_type = POWER_STATE_TYPE_BALANCED;
538
		break;
539
	case PP_StateUILabel_Performance:
540
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
541
		break;
542
	default:
543
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
544
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
545
		else
546
			pm_type = POWER_STATE_TYPE_DEFAULT;
547
		break;
548
	}
549
	mutex_unlock(&hwmgr->smu_lock);
550 551

	return pm_type;
552
}
553

554
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
555
{
556
	struct pp_hwmgr *hwmgr = handle;
557
	int ret = 0;
558

559
	ret = pp_check(hwmgr);
560

561
	if (ret)
562
		return;
563

564
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
565
		pr_info("%s was not implemented.\n", __func__);
566
		return;
567
	}
568
	mutex_lock(&hwmgr->smu_lock);
569
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
570
	mutex_unlock(&hwmgr->smu_lock);
571 572
}

573
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
574
{
575
	struct pp_hwmgr *hwmgr = handle;
576
	int ret = 0;
577
	uint32_t mode = 0;
578

579
	ret = pp_check(hwmgr);
580

581
	if (ret)
582
		return ret;
583

584
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
585
		pr_info("%s was not implemented.\n", __func__);
586 587
		return 0;
	}
588
	mutex_lock(&hwmgr->smu_lock);
589
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
590
	mutex_unlock(&hwmgr->smu_lock);
591
	return mode;
592 593 594 595
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
596
	struct pp_hwmgr *hwmgr = handle;
597
	int ret = 0;
598

599
	ret = pp_check(hwmgr);
600

601
	if (ret)
602
		return ret;
603

604
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
605
		pr_info("%s was not implemented.\n", __func__);
606 607
		return 0;
	}
608
	mutex_lock(&hwmgr->smu_lock);
609
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
610
	mutex_unlock(&hwmgr->smu_lock);
611
	return ret;
612 613 614 615
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
616
	struct pp_hwmgr *hwmgr = handle;
617
	int ret = 0;
618

619
	ret = pp_check(hwmgr);
620

621
	if (ret)
622
		return ret;
623

624
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
625
		pr_info("%s was not implemented.\n", __func__);
626 627
		return 0;
	}
628

629
	mutex_lock(&hwmgr->smu_lock);
630
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
631
	mutex_unlock(&hwmgr->smu_lock);
632
	return ret;
633 634
}

635 636
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
637
	struct pp_hwmgr *hwmgr = handle;
638
	int ret = 0;
639

640
	ret = pp_check(hwmgr);
641

642
	if (ret)
643
		return ret;
644 645 646 647

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

648
	mutex_lock(&hwmgr->smu_lock);
649
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
650
	mutex_unlock(&hwmgr->smu_lock);
651
	return ret;
652 653
}

654 655 656
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
657
	struct pp_hwmgr *hwmgr = handle;
658
	int i;
659
	int ret = 0;
660

661 662
	memset(data, 0, sizeof(*data));

663
	ret = pp_check(hwmgr);
664

665
	if (ret)
666 667 668
		return ret;

	if (hwmgr->ps == NULL)
669 670
		return -EINVAL;

671
	mutex_lock(&hwmgr->smu_lock);
672

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
695
	mutex_unlock(&hwmgr->smu_lock);
696 697 698 699 700
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
701
	struct pp_hwmgr *hwmgr = handle;
702
	int ret = 0;
703
	int size = 0;
704

705
	ret = pp_check(hwmgr);
706

707
	if (ret)
708
		return ret;
709

710 711 712
	if (!hwmgr->soft_pp_table)
		return -EINVAL;

713
	mutex_lock(&hwmgr->smu_lock);
714
	*table = (char *)hwmgr->soft_pp_table;
715
	size = hwmgr->soft_pp_table_size;
716
	mutex_unlock(&hwmgr->smu_lock);
717
	return size;
718 719
}

720 721
static int amd_powerplay_reset(void *handle)
{
722
	struct pp_hwmgr *hwmgr = handle;
723 724
	int ret;

725
	ret = pp_check(hwmgr);
726 727 728
	if (ret)
		return ret;

729
	ret = pp_hw_fini(hwmgr);
730 731 732
	if (ret)
		return ret;

733
	ret = hwmgr_hw_init(hwmgr);
734 735 736
	if (ret)
		return ret;

737
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
738 739
}

740 741
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
742
	struct pp_hwmgr *hwmgr = handle;
743
	int ret = 0;
744

745
	ret = pp_check(hwmgr);
746

747
	if (ret)
748
		return ret;
749

750
	mutex_lock(&hwmgr->smu_lock);
751
	if (!hwmgr->hardcode_pp_table) {
752 753 754
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
755
		if (!hwmgr->hardcode_pp_table) {
756
			mutex_unlock(&hwmgr->smu_lock);
757
			return -ENOMEM;
758
		}
759
	}
760

761 762 763
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
764
	mutex_unlock(&hwmgr->smu_lock);
765

766 767 768 769 770 771 772 773 774 775 776
	ret = amd_powerplay_reset(handle);
	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
			return ret;
	}

	return 0;
777 778 779
}

static int pp_dpm_force_clock_level(void *handle,
780
		enum pp_clock_type type, uint32_t mask)
781
{
782
	struct pp_hwmgr *hwmgr = handle;
783
	int ret = 0;
784

785
	ret = pp_check(hwmgr);
786

787
	if (ret)
788
		return ret;
789

790
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
791
		pr_info("%s was not implemented.\n", __func__);
792 793
		return 0;
	}
794
	mutex_lock(&hwmgr->smu_lock);
795 796 797 798
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
799
	mutex_unlock(&hwmgr->smu_lock);
800
	return ret;
801 802 803 804 805
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
806
	struct pp_hwmgr *hwmgr = handle;
807
	int ret = 0;
808

809
	ret = pp_check(hwmgr);
810

811
	if (ret)
812
		return ret;
813

814
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
815
		pr_info("%s was not implemented.\n", __func__);
816 817
		return 0;
	}
818
	mutex_lock(&hwmgr->smu_lock);
819
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
820
	mutex_unlock(&hwmgr->smu_lock);
821
	return ret;
822 823
}

824 825
static int pp_dpm_get_sclk_od(void *handle)
{
826
	struct pp_hwmgr *hwmgr = handle;
827
	int ret = 0;
828

829
	ret = pp_check(hwmgr);
830

831
	if (ret)
832
		return ret;
833 834

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
835
		pr_info("%s was not implemented.\n", __func__);
836 837
		return 0;
	}
838
	mutex_lock(&hwmgr->smu_lock);
839
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
840
	mutex_unlock(&hwmgr->smu_lock);
841
	return ret;
842 843 844 845
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
846
	struct pp_hwmgr *hwmgr = handle;
847
	int ret = 0;
848

849
	ret = pp_check(hwmgr);
850

851
	if (ret)
852
		return ret;
853 854

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
855
		pr_info("%s was not implemented.\n", __func__);
856 857 858
		return 0;
	}

859
	mutex_lock(&hwmgr->smu_lock);
860
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
861
	mutex_unlock(&hwmgr->smu_lock);
862
	return ret;
863 864
}

865 866
static int pp_dpm_get_mclk_od(void *handle)
{
867
	struct pp_hwmgr *hwmgr = handle;
868
	int ret = 0;
869

870
	ret = pp_check(hwmgr);
871

872
	if (ret)
873
		return ret;
874 875

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
876
		pr_info("%s was not implemented.\n", __func__);
877 878
		return 0;
	}
879
	mutex_lock(&hwmgr->smu_lock);
880
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
881
	mutex_unlock(&hwmgr->smu_lock);
882
	return ret;
883 884 885 886
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
887
	struct pp_hwmgr *hwmgr = handle;
888
	int ret = 0;
889

890
	ret = pp_check(hwmgr);
891

892
	if (ret)
893
		return ret;
894 895

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
896
		pr_info("%s was not implemented.\n", __func__);
897 898
		return 0;
	}
899
	mutex_lock(&hwmgr->smu_lock);
900
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
901
	mutex_unlock(&hwmgr->smu_lock);
902
	return ret;
903 904
}

905 906
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
907
{
908
	struct pp_hwmgr *hwmgr = handle;
909
	int ret = 0;
910

911
	ret = pp_check(hwmgr);
912
	if (ret)
913
		return ret;
914

915 916 917 918 919 920 921 922 923
	if (value == NULL)
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
924
		return 0;
925
	default:
926
		mutex_lock(&hwmgr->smu_lock);
927
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
928
		mutex_unlock(&hwmgr->smu_lock);
929
		return ret;
930 931 932
	}
}

933 934 935
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
936
	struct pp_hwmgr *hwmgr = handle;
937
	int ret = 0;
938

939
	ret = pp_check(hwmgr);
940

941
	if (ret)
942 943 944 945
		return NULL;

	if (hwmgr && idx < hwmgr->num_vce_state_tables)
		return &hwmgr->vce_states[idx];
946 947 948
	return NULL;
}

949 950
static int pp_get_power_profile_mode(void *handle, char *buf)
{
951
	struct pp_hwmgr *hwmgr = handle;
952

953
	if (!buf || pp_check(hwmgr))
954 955 956 957 958 959 960 961 962 963 964 965
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
966
	struct pp_hwmgr *hwmgr = handle;
967
	int ret = -EINVAL;
968

969
	if (pp_check(hwmgr))
970 971 972 973 974 975
		return -EINVAL;

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}
976
	mutex_lock(&hwmgr->smu_lock);
977 978
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
979
	mutex_unlock(&hwmgr->smu_lock);
980
	return ret;
981 982
}

983 984
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
985
	struct pp_hwmgr *hwmgr = handle;
986

987
	if (pp_check(hwmgr))
988 989 990 991 992 993 994 995 996 997
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

998
static int pp_dpm_switch_power_profile(void *handle,
999
		enum PP_SMC_POWER_PROFILE type, bool en)
1000
{
1001
	struct pp_hwmgr *hwmgr = handle;
1002 1003
	long workload;
	uint32_t index;
1004

1005
	if (pp_check(hwmgr))
1006 1007
		return -EINVAL;

1008 1009 1010 1011 1012 1013 1014 1015
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

1016
	mutex_lock(&hwmgr->smu_lock);
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
1028 1029
	}

1030 1031
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1032
	mutex_unlock(&hwmgr->smu_lock);
1033

1034 1035 1036
	return 0;
}

1037 1038 1039 1040 1041 1042 1043
static int pp_dpm_notify_smu_memory_info(void *handle,
					uint32_t virtual_addr_low,
					uint32_t virtual_addr_hi,
					uint32_t mc_addr_low,
					uint32_t mc_addr_hi,
					uint32_t size)
{
1044
	struct pp_hwmgr *hwmgr = handle;
1045 1046
	int ret = 0;

1047
	ret = pp_check(hwmgr);
1048 1049 1050 1051 1052 1053 1054 1055 1056

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

1057
	mutex_lock(&hwmgr->smu_lock);
1058 1059 1060 1061 1062

	ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
					virtual_addr_hi, mc_addr_low, mc_addr_hi,
					size);

1063
	mutex_unlock(&hwmgr->smu_lock);
1064 1065 1066 1067

	return ret;
}

1068 1069
static int pp_set_power_limit(void *handle, uint32_t limit)
{
1070
	struct pp_hwmgr *hwmgr = handle;
1071 1072
	int ret = 0;

1073
	ret = pp_check(hwmgr);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

1089
	mutex_lock(&hwmgr->smu_lock);
1090 1091
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
1092
	mutex_unlock(&hwmgr->smu_lock);
1093 1094 1095 1096 1097
	return ret;
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
1098
	struct pp_hwmgr *hwmgr = handle;
1099 1100
	int ret = 0;

1101
	ret = pp_check(hwmgr);
1102 1103 1104 1105 1106 1107 1108

	if (ret)
		return ret;

	if (limit == NULL)
		return -EINVAL;

1109
	mutex_lock(&hwmgr->smu_lock);
1110 1111 1112 1113 1114 1115

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

1116
	mutex_unlock(&hwmgr->smu_lock);
1117 1118 1119 1120

	return ret;
}

1121
static int pp_display_configuration_change(void *handle,
1122
	const struct amd_pp_display_configuration *display_config)
1123
{
1124
	struct pp_hwmgr *hwmgr = handle;
1125
	int ret = 0;
1126

1127
	ret = pp_check(hwmgr);
1128

1129
	if (ret)
1130
		return ret;
1131

1132
	mutex_lock(&hwmgr->smu_lock);
1133
	phm_store_dal_configuration_data(hwmgr, display_config);
1134
	mutex_unlock(&hwmgr->smu_lock);
1135 1136
	return 0;
}
1137

1138
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
1139
		struct amd_pp_simple_clock_info *output)
1140
{
1141
	struct pp_hwmgr *hwmgr = handle;
1142
	int ret = 0;
1143

1144
	ret = pp_check(hwmgr);
1145

1146
	if (ret)
1147
		return ret;
1148

1149 1150
	if (output == NULL)
		return -EINVAL;
1151

1152
	mutex_lock(&hwmgr->smu_lock);
1153
	ret = phm_get_dal_power_level(hwmgr, output);
1154
	mutex_unlock(&hwmgr->smu_lock);
1155
	return ret;
1156
}
1157

1158
static int pp_get_current_clocks(void *handle,
1159
		struct amd_pp_clock_info *clocks)
1160 1161 1162
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1163
	struct pp_hwmgr *hwmgr = handle;
1164
	int ret = 0;
1165

1166
	ret = pp_check(hwmgr);
1167

1168
	if (ret)
1169
		return ret;
1170

1171
	mutex_lock(&hwmgr->smu_lock);
1172

1173 1174
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1175 1176 1177 1178 1179 1180 1181 1182
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1183
	if (ret) {
1184
		pr_info("Error in phm_get_clock_info \n");
1185
		mutex_unlock(&hwmgr->smu_lock);
1186
		return -EINVAL;
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1205
	mutex_unlock(&hwmgr->smu_lock);
1206 1207 1208
	return 0;
}

1209
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1210
{
1211
	struct pp_hwmgr *hwmgr = handle;
1212
	int ret = 0;
1213

1214
	ret = pp_check(hwmgr);
1215

1216
	if (ret)
1217 1218
		return ret;

1219
	if (clocks == NULL)
1220 1221
		return -EINVAL;

1222
	mutex_lock(&hwmgr->smu_lock);
1223
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1224
	mutex_unlock(&hwmgr->smu_lock);
1225
	return ret;
1226 1227
}

1228
static int pp_get_clock_by_type_with_latency(void *handle,
1229 1230 1231
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1232
	struct pp_hwmgr *hwmgr = handle;
1233 1234
	int ret = 0;

1235
	ret = pp_check(hwmgr);
1236
	if (ret)
1237 1238 1239 1240 1241
		return ret;

	if (!clocks)
		return -EINVAL;

1242
	mutex_lock(&hwmgr->smu_lock);
1243
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1244
	mutex_unlock(&hwmgr->smu_lock);
1245 1246 1247
	return ret;
}

1248
static int pp_get_clock_by_type_with_voltage(void *handle,
1249 1250 1251
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1252
	struct pp_hwmgr *hwmgr = handle;
1253 1254
	int ret = 0;

1255
	ret = pp_check(hwmgr);
1256
	if (ret)
1257 1258 1259 1260 1261
		return ret;

	if (!clocks)
		return -EINVAL;

1262
	mutex_lock(&hwmgr->smu_lock);
1263 1264 1265

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1266
	mutex_unlock(&hwmgr->smu_lock);
1267 1268 1269
	return ret;
}

1270
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1271 1272
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1273
	struct pp_hwmgr *hwmgr = handle;
1274 1275
	int ret = 0;

1276
	ret = pp_check(hwmgr);
1277
	if (ret)
1278 1279 1280 1281 1282
		return ret;

	if (!wm_with_clock_ranges)
		return -EINVAL;

1283
	mutex_lock(&hwmgr->smu_lock);
1284 1285
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1286
	mutex_unlock(&hwmgr->smu_lock);
1287 1288 1289 1290

	return ret;
}

1291
static int pp_display_clock_voltage_request(void *handle,
1292 1293
		struct pp_display_clock_request *clock)
{
1294
	struct pp_hwmgr *hwmgr = handle;
1295 1296
	int ret = 0;

1297
	ret = pp_check(hwmgr);
1298
	if (ret)
1299 1300 1301 1302 1303
		return ret;

	if (!clock)
		return -EINVAL;

1304
	mutex_lock(&hwmgr->smu_lock);
1305
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1306
	mutex_unlock(&hwmgr->smu_lock);
1307 1308 1309 1310

	return ret;
}

1311
static int pp_get_display_mode_validation_clocks(void *handle,
1312
		struct amd_pp_simple_clock_info *clocks)
1313
{
1314
	struct pp_hwmgr *hwmgr = handle;
1315
	int ret = 0;
1316

1317
	ret = pp_check(hwmgr);
1318

1319
	if (ret)
1320 1321 1322 1323
		return ret;

	if (clocks == NULL)
		return -EINVAL;
1324

1325
	mutex_lock(&hwmgr->smu_lock);
1326

1327
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1328
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1329

1330
	mutex_unlock(&hwmgr->smu_lock);
1331
	return ret;
1332 1333
}

1334 1335
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1336
	struct pp_hwmgr *hwmgr = handle;
1337 1338
	int ret = 0;

1339
	ret = pp_check(hwmgr);
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351

	if (ret)
		return ret;

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1352
static const struct amd_pm_funcs pp_dpm_funcs = {
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1379
	.notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1380 1381
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1382
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1383 1384
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1397
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1398
};