amd_powerplay.c 29.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28 29
#include "amd_shared.h"
#include "amd_powerplay.h"
30
#include "power_state.h"
31
#include "amdgpu.h"
R
Rex Zhu 已提交
32
#include "hwmgr.h"
33

R
Rex Zhu 已提交
34

35
static const struct amd_pm_funcs pp_dpm_funcs;
36

37
static int amd_powerplay_create(struct amdgpu_device *adev)
38
{
39
	struct pp_hwmgr *hwmgr;
40

41
	if (adev == NULL)
42 43
		return -EINVAL;

44 45
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
46 47
		return -ENOMEM;

48
	hwmgr->adev = adev;
49 50
	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
51 52 53 54 55 56 57
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
	hwmgr->feature_mask = amdgpu_pp_feature_mask;
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
58 59 60
	return 0;
}

61

62
static void amd_powerplay_destroy(struct amdgpu_device *adev)
63
{
64
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
65

66 67
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
68

69 70
	kfree(hwmgr);
	hwmgr = NULL;
71 72
}

73 74 75
static int pp_early_init(void *handle)
{
	int ret;
76
	struct amdgpu_device *adev = handle;
77

78
	ret = amd_powerplay_create(adev);
79

80 81 82
	if (ret != 0)
		return ret;

83
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
84
	if (ret)
85
		return -EINVAL;
86

87
	return 0;
88 89
}

90
static int pp_sw_init(void *handle)
91
{
92 93
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
94 95
	int ret = 0;

96
	ret = hwmgr_sw_init(hwmgr);
97

98
	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
99

100 101
	return ret;
}
102

103 104
static int pp_sw_fini(void *handle)
{
105 106
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
107

108
	hwmgr_sw_fini(hwmgr);
109 110 111 112

	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_fini_bo(adev);

113
	return 0;
114 115 116 117
}

static int pp_hw_init(void *handle)
{
118
	int ret = 0;
119 120
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
121

122 123
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
124

125
	ret = hwmgr_hw_init(hwmgr);
126

127 128
	if (ret)
		pr_err("powerplay hw init failed\n");
129

130
	return ret;
131 132 133 134
}

static int pp_hw_fini(void *handle)
{
135 136
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
137

138
	hwmgr_hw_fini(hwmgr);
139

140 141 142
	return 0;
}

R
Rex Zhu 已提交
143 144
static int pp_late_init(void *handle)
{
145 146 147
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

148 149 150
	if (hwmgr && hwmgr->pm_en) {
		mutex_lock(&hwmgr->smu_lock);
		hwmgr_handle_task(hwmgr,
151
					AMD_PP_TASK_COMPLETE_INIT, NULL);
152 153
		mutex_unlock(&hwmgr->smu_lock);
	}
R
Rex Zhu 已提交
154 155 156
	return 0;
}

157 158
static void pp_late_fini(void *handle)
{
159 160 161
	struct amdgpu_device *adev = handle;

	amd_powerplay_destroy(adev);
162 163 164
}


165 166
static bool pp_is_idle(void *handle)
{
167
	return false;
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
183 184
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
185

186 187
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
188

189
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
190
		pr_info("%s was not implemented.\n", __func__);
191 192
		return 0;
	}
193 194 195

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
196
			state == AMD_PG_STATE_GATE);
197 198 199 200
}

static int pp_suspend(void *handle)
{
201 202
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
203

204
	return hwmgr_suspend(hwmgr);
205 206 207 208
}

static int pp_resume(void *handle)
{
209 210
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
211

212
	return hwmgr_resume(hwmgr);
213 214
}

215 216 217 218 219 220
static int pp_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	return 0;
}

221
static const struct amd_ip_funcs pp_ip_funcs = {
222
	.name = "powerplay",
223
	.early_init = pp_early_init,
R
Rex Zhu 已提交
224
	.late_init = pp_late_init,
225 226 227 228
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
229
	.late_fini = pp_late_fini,
230 231 232 233 234
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
235
	.set_clockgating_state = pp_set_clockgating_state,
236 237 238
	.set_powergating_state = pp_set_powergating_state,
};

239 240 241 242 243 244 245 246 247
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

248 249 250 251 252 253 254 255 256 257
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

258 259
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
260
	struct pp_hwmgr *hwmgr = handle;
261

262 263
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
264 265 266 267 268 269 270 271 272

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
			cgs_set_clockgating_state(hwmgr->device,
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
			cgs_set_clockgating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
			cgs_set_powergating_state(hwmgr->device,
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

309 310 311
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
312
	struct pp_hwmgr *hwmgr = handle;
313

314 315
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
316

317 318 319
	if (level == hwmgr->dpm_level)
		return 0;

320
	mutex_lock(&hwmgr->smu_lock);
321 322
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
323 324
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
325

326 327
	return 0;
}
328

329 330 331
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
332
	struct pp_hwmgr *hwmgr = handle;
333
	enum amd_dpm_forced_level level;
334

335 336
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
337

338
	mutex_lock(&hwmgr->smu_lock);
339
	level = hwmgr->dpm_level;
340
	mutex_unlock(&hwmgr->smu_lock);
341
	return level;
342
}
343

344
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
345
{
346
	struct pp_hwmgr *hwmgr = handle;
347
	uint32_t clk = 0;
348

349 350
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
351

352
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
353
		pr_info("%s was not implemented.\n", __func__);
354 355
		return 0;
	}
356
	mutex_lock(&hwmgr->smu_lock);
357
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
358
	mutex_unlock(&hwmgr->smu_lock);
359
	return clk;
360
}
361

362
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
363
{
364
	struct pp_hwmgr *hwmgr = handle;
365
	uint32_t clk = 0;
366

367 368
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
369

370
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
371
		pr_info("%s was not implemented.\n", __func__);
372 373
		return 0;
	}
374
	mutex_lock(&hwmgr->smu_lock);
375
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
376
	mutex_unlock(&hwmgr->smu_lock);
377
	return clk;
378
}
379

380
static void pp_dpm_powergate_vce(void *handle, bool gate)
381
{
382
	struct pp_hwmgr *hwmgr = handle;
383

384
	if (!hwmgr || !hwmgr->pm_en)
385
		return;
386

387
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
388
		pr_info("%s was not implemented.\n", __func__);
389
		return;
390
	}
391
	mutex_lock(&hwmgr->smu_lock);
392
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
393
	mutex_unlock(&hwmgr->smu_lock);
394
}
395

396
static void pp_dpm_powergate_uvd(void *handle, bool gate)
397
{
398
	struct pp_hwmgr *hwmgr = handle;
399

400
	if (!hwmgr || !hwmgr->pm_en)
401
		return;
402

403
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
404
		pr_info("%s was not implemented.\n", __func__);
405
		return;
406
	}
407
	mutex_lock(&hwmgr->smu_lock);
408
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
409
	mutex_unlock(&hwmgr->smu_lock);
410 411
}

412
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
413
		enum amd_pm_state_type *user_state)
414
{
415
	int ret = 0;
416
	struct pp_hwmgr *hwmgr = handle;
417

418 419
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
420

421 422 423
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
424

425
	return ret;
426
}
427

428
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
429
{
430
	struct pp_hwmgr *hwmgr = handle;
431
	struct pp_power_state *state;
432
	enum amd_pm_state_type pm_type;
433

434
	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
435 436
		return -EINVAL;

437
	mutex_lock(&hwmgr->smu_lock);
438

439 440 441 442
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
443
		pm_type = POWER_STATE_TYPE_BATTERY;
444
		break;
445
	case PP_StateUILabel_Balanced:
446
		pm_type = POWER_STATE_TYPE_BALANCED;
447
		break;
448
	case PP_StateUILabel_Performance:
449
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
450
		break;
451
	default:
452
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
453
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
454
		else
455
			pm_type = POWER_STATE_TYPE_DEFAULT;
456
		break;
457
	}
458
	mutex_unlock(&hwmgr->smu_lock);
459 460

	return pm_type;
461
}
462

463
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
464
{
465
	struct pp_hwmgr *hwmgr = handle;
466

467
	if (!hwmgr || !hwmgr->pm_en)
468
		return;
469

470
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
471
		pr_info("%s was not implemented.\n", __func__);
472
		return;
473
	}
474
	mutex_lock(&hwmgr->smu_lock);
475
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
476
	mutex_unlock(&hwmgr->smu_lock);
477 478
}

479
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
480
{
481
	struct pp_hwmgr *hwmgr = handle;
482
	uint32_t mode = 0;
483

484 485
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
486

487
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
488
		pr_info("%s was not implemented.\n", __func__);
489 490
		return 0;
	}
491
	mutex_lock(&hwmgr->smu_lock);
492
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
493
	mutex_unlock(&hwmgr->smu_lock);
494
	return mode;
495 496 497 498
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
499
	struct pp_hwmgr *hwmgr = handle;
500
	int ret = 0;
501

502 503
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
504

505
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
506
		pr_info("%s was not implemented.\n", __func__);
507 508
		return 0;
	}
509
	mutex_lock(&hwmgr->smu_lock);
510
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
511
	mutex_unlock(&hwmgr->smu_lock);
512
	return ret;
513 514 515 516
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
517
	struct pp_hwmgr *hwmgr = handle;
518
	int ret = 0;
519

520 521
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
522

523
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
524
		pr_info("%s was not implemented.\n", __func__);
525 526
		return 0;
	}
527

528
	mutex_lock(&hwmgr->smu_lock);
529
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
530
	mutex_unlock(&hwmgr->smu_lock);
531
	return ret;
532 533
}

534 535
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
536
	struct pp_hwmgr *hwmgr = handle;
537
	int ret = 0;
538

539 540
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
541 542 543 544

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

545
	mutex_lock(&hwmgr->smu_lock);
546
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
547
	mutex_unlock(&hwmgr->smu_lock);
548
	return ret;
549 550
}

551 552 553
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
554
	struct pp_hwmgr *hwmgr = handle;
555 556
	int i;

557 558
	memset(data, 0, sizeof(*data));

559
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
560 561
		return -EINVAL;

562
	mutex_lock(&hwmgr->smu_lock);
563

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
586
	mutex_unlock(&hwmgr->smu_lock);
587 588 589 590 591
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
592
	struct pp_hwmgr *hwmgr = handle;
593
	int size = 0;
594

595
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
596 597
		return -EINVAL;

598
	mutex_lock(&hwmgr->smu_lock);
599
	*table = (char *)hwmgr->soft_pp_table;
600
	size = hwmgr->soft_pp_table_size;
601
	mutex_unlock(&hwmgr->smu_lock);
602
	return size;
603 604
}

605 606
static int amd_powerplay_reset(void *handle)
{
607
	struct pp_hwmgr *hwmgr = handle;
608 609
	int ret;

610
	ret = hwmgr_hw_fini(hwmgr);
611 612 613
	if (ret)
		return ret;

614
	ret = hwmgr_hw_init(hwmgr);
615 616 617
	if (ret)
		return ret;

618
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
619 620
}

621 622
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
623
	struct pp_hwmgr *hwmgr = handle;
624
	int ret = -ENOMEM;
625

626 627
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
628

629
	mutex_lock(&hwmgr->smu_lock);
630
	if (!hwmgr->hardcode_pp_table) {
631 632 633
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
634 635
		if (!hwmgr->hardcode_pp_table)
			goto err;
636
	}
637

638 639 640 641
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;

642 643
	ret = amd_powerplay_reset(handle);
	if (ret)
644
		goto err;
645 646 647 648

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
649
			goto err;
650
	}
651
	mutex_unlock(&hwmgr->smu_lock);
652
	return 0;
653 654 655
err:
	mutex_unlock(&hwmgr->smu_lock);
	return ret;
656 657 658
}

static int pp_dpm_force_clock_level(void *handle,
659
		enum pp_clock_type type, uint32_t mask)
660
{
661
	struct pp_hwmgr *hwmgr = handle;
662
	int ret = 0;
663

664 665
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
666

667
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
668
		pr_info("%s was not implemented.\n", __func__);
669 670
		return 0;
	}
671
	mutex_lock(&hwmgr->smu_lock);
672 673 674 675
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
676
	mutex_unlock(&hwmgr->smu_lock);
677
	return ret;
678 679 680 681 682
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
683
	struct pp_hwmgr *hwmgr = handle;
684
	int ret = 0;
685

686 687
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
688

689
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
690
		pr_info("%s was not implemented.\n", __func__);
691 692
		return 0;
	}
693
	mutex_lock(&hwmgr->smu_lock);
694
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
695
	mutex_unlock(&hwmgr->smu_lock);
696
	return ret;
697 698
}

699 700
static int pp_dpm_get_sclk_od(void *handle)
{
701
	struct pp_hwmgr *hwmgr = handle;
702
	int ret = 0;
703

704 705
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
706 707

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
708
		pr_info("%s was not implemented.\n", __func__);
709 710
		return 0;
	}
711
	mutex_lock(&hwmgr->smu_lock);
712
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
713
	mutex_unlock(&hwmgr->smu_lock);
714
	return ret;
715 716 717 718
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
719
	struct pp_hwmgr *hwmgr = handle;
720
	int ret = 0;
721

722 723
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
724 725

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
726
		pr_info("%s was not implemented.\n", __func__);
727 728 729
		return 0;
	}

730
	mutex_lock(&hwmgr->smu_lock);
731
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
732
	mutex_unlock(&hwmgr->smu_lock);
733
	return ret;
734 735
}

736 737
static int pp_dpm_get_mclk_od(void *handle)
{
738
	struct pp_hwmgr *hwmgr = handle;
739
	int ret = 0;
740

741 742
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
743 744

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
745
		pr_info("%s was not implemented.\n", __func__);
746 747
		return 0;
	}
748
	mutex_lock(&hwmgr->smu_lock);
749
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
750
	mutex_unlock(&hwmgr->smu_lock);
751
	return ret;
752 753 754 755
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
756
	struct pp_hwmgr *hwmgr = handle;
757
	int ret = 0;
758

759 760
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
761 762

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
763
		pr_info("%s was not implemented.\n", __func__);
764 765
		return 0;
	}
766
	mutex_lock(&hwmgr->smu_lock);
767
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
768
	mutex_unlock(&hwmgr->smu_lock);
769
	return ret;
770 771
}

772 773
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
774
{
775
	struct pp_hwmgr *hwmgr = handle;
776
	int ret = 0;
777

778
	if (!hwmgr || !hwmgr->pm_en || !value)
779 780 781 782 783 784 785 786
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
787
		return 0;
788
	default:
789
		mutex_lock(&hwmgr->smu_lock);
790
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
791
		mutex_unlock(&hwmgr->smu_lock);
792
		return ret;
793 794 795
	}
}

796 797 798
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
799
	struct pp_hwmgr *hwmgr = handle;
800

801
	if (!hwmgr || !hwmgr->pm_en)
802 803
		return NULL;

804
	if (idx < hwmgr->num_vce_state_tables)
805
		return &hwmgr->vce_states[idx];
806 807 808
	return NULL;
}

809 810
static int pp_get_power_profile_mode(void *handle, char *buf)
{
811
	struct pp_hwmgr *hwmgr = handle;
812

813
	if (!hwmgr || !hwmgr->pm_en || !buf)
814 815 816 817 818 819 820 821 822 823 824 825
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
826
	struct pp_hwmgr *hwmgr = handle;
827
	int ret = -EINVAL;
828

829 830
	if (!hwmgr || !hwmgr->pm_en)
		return ret;
831 832 833

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
834
		return ret;
835
	}
836
	mutex_lock(&hwmgr->smu_lock);
837 838
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
839
	mutex_unlock(&hwmgr->smu_lock);
840
	return ret;
841 842
}

843 844
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
845
	struct pp_hwmgr *hwmgr = handle;
846

847
	if (!hwmgr || !hwmgr->pm_en)
848 849 850 851 852 853 854 855 856 857
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

858
static int pp_dpm_switch_power_profile(void *handle,
859
		enum PP_SMC_POWER_PROFILE type, bool en)
860
{
861
	struct pp_hwmgr *hwmgr = handle;
862 863
	long workload;
	uint32_t index;
864

865
	if (!hwmgr || !hwmgr->pm_en)
866 867
		return -EINVAL;

868 869 870 871 872 873 874 875
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

876
	mutex_lock(&hwmgr->smu_lock);
877 878 879 880 881 882 883 884 885 886 887

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
888 889
	}

890 891
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
892
	mutex_unlock(&hwmgr->smu_lock);
893

894 895 896
	return 0;
}

897 898 899 900 901 902 903
static int pp_dpm_notify_smu_memory_info(void *handle,
					uint32_t virtual_addr_low,
					uint32_t virtual_addr_hi,
					uint32_t mc_addr_low,
					uint32_t mc_addr_hi,
					uint32_t size)
{
904
	struct pp_hwmgr *hwmgr = handle;
905 906
	int ret = 0;

907 908
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
909 910 911 912 913 914

	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

915
	mutex_lock(&hwmgr->smu_lock);
916 917 918 919 920

	ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
					virtual_addr_hi, mc_addr_low, mc_addr_hi,
					size);

921
	mutex_unlock(&hwmgr->smu_lock);
922 923 924 925

	return ret;
}

926 927
static int pp_set_power_limit(void *handle, uint32_t limit)
{
928
	struct pp_hwmgr *hwmgr = handle;
929

930 931
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
932 933 934 935 936 937 938 939 940 941 942 943

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

944
	mutex_lock(&hwmgr->smu_lock);
945 946
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
947
	mutex_unlock(&hwmgr->smu_lock);
948
	return 0;
949 950 951 952
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
953
	struct pp_hwmgr *hwmgr = handle;
954

955
	if (!hwmgr || !hwmgr->pm_en ||!limit)
956 957
		return -EINVAL;

958
	mutex_lock(&hwmgr->smu_lock);
959 960 961 962 963 964

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

965
	mutex_unlock(&hwmgr->smu_lock);
966

967
	return 0;
968 969
}

970
static int pp_display_configuration_change(void *handle,
971
	const struct amd_pp_display_configuration *display_config)
972
{
973
	struct pp_hwmgr *hwmgr = handle;
974

975 976
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
977

978
	mutex_lock(&hwmgr->smu_lock);
979
	phm_store_dal_configuration_data(hwmgr, display_config);
980
	mutex_unlock(&hwmgr->smu_lock);
981 982
	return 0;
}
983

984
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
985
		struct amd_pp_simple_clock_info *output)
986
{
987
	struct pp_hwmgr *hwmgr = handle;
988
	int ret = 0;
989

990
	if (!hwmgr || !hwmgr->pm_en ||!output)
991
		return -EINVAL;
992

993
	mutex_lock(&hwmgr->smu_lock);
994
	ret = phm_get_dal_power_level(hwmgr, output);
995
	mutex_unlock(&hwmgr->smu_lock);
996
	return ret;
997
}
998

999
static int pp_get_current_clocks(void *handle,
1000
		struct amd_pp_clock_info *clocks)
1001 1002 1003
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1004
	struct pp_hwmgr *hwmgr = handle;
1005
	int ret = 0;
1006

1007 1008
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1009

1010
	mutex_lock(&hwmgr->smu_lock);
1011

1012 1013
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1014 1015 1016 1017 1018 1019 1020 1021
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1022
	if (ret) {
1023
		pr_info("Error in phm_get_clock_info \n");
1024
		mutex_unlock(&hwmgr->smu_lock);
1025
		return -EINVAL;
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1044
	mutex_unlock(&hwmgr->smu_lock);
1045 1046 1047
	return 0;
}

1048
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1049
{
1050
	struct pp_hwmgr *hwmgr = handle;
1051
	int ret = 0;
1052

1053 1054
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1055

1056
	if (clocks == NULL)
1057 1058
		return -EINVAL;

1059
	mutex_lock(&hwmgr->smu_lock);
1060
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1061
	mutex_unlock(&hwmgr->smu_lock);
1062
	return ret;
1063 1064
}

1065
static int pp_get_clock_by_type_with_latency(void *handle,
1066 1067 1068
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1069
	struct pp_hwmgr *hwmgr = handle;
1070 1071
	int ret = 0;

1072
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1073 1074
		return -EINVAL;

1075
	mutex_lock(&hwmgr->smu_lock);
1076
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1077
	mutex_unlock(&hwmgr->smu_lock);
1078 1079 1080
	return ret;
}

1081
static int pp_get_clock_by_type_with_voltage(void *handle,
1082 1083 1084
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1085
	struct pp_hwmgr *hwmgr = handle;
1086 1087
	int ret = 0;

1088
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1089 1090
		return -EINVAL;

1091
	mutex_lock(&hwmgr->smu_lock);
1092 1093 1094

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1095
	mutex_unlock(&hwmgr->smu_lock);
1096 1097 1098
	return ret;
}

1099
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1100 1101
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1102
	struct pp_hwmgr *hwmgr = handle;
1103 1104
	int ret = 0;

1105
	if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
1106 1107
		return -EINVAL;

1108
	mutex_lock(&hwmgr->smu_lock);
1109 1110
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1111
	mutex_unlock(&hwmgr->smu_lock);
1112 1113 1114 1115

	return ret;
}

1116
static int pp_display_clock_voltage_request(void *handle,
1117 1118
		struct pp_display_clock_request *clock)
{
1119
	struct pp_hwmgr *hwmgr = handle;
1120 1121
	int ret = 0;

1122
	if (!hwmgr || !hwmgr->pm_en ||!clock)
1123 1124
		return -EINVAL;

1125
	mutex_lock(&hwmgr->smu_lock);
1126
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1127
	mutex_unlock(&hwmgr->smu_lock);
1128 1129 1130 1131

	return ret;
}

1132
static int pp_get_display_mode_validation_clocks(void *handle,
1133
		struct amd_pp_simple_clock_info *clocks)
1134
{
1135
	struct pp_hwmgr *hwmgr = handle;
1136
	int ret = 0;
1137

1138
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1139
		return -EINVAL;
1140

1141
	mutex_lock(&hwmgr->smu_lock);
1142

1143
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1144
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1145

1146
	mutex_unlock(&hwmgr->smu_lock);
1147
	return ret;
1148 1149
}

1150 1151
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1152
	struct pp_hwmgr *hwmgr = handle;
1153

1154 1155
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1156 1157 1158 1159 1160 1161 1162 1163 1164

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1165
static const struct amd_pm_funcs pp_dpm_funcs = {
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1192
	.notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1193 1194
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1195
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1196 1197
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1210
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1211
};