amd_powerplay.c 29.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28
#include <linux/firmware.h>
29 30
#include "amd_shared.h"
#include "amd_powerplay.h"
31
#include "power_state.h"
32
#include "amdgpu.h"
R
Rex Zhu 已提交
33
#include "hwmgr.h"
34

R
Rex Zhu 已提交
35

36
static const struct amd_pm_funcs pp_dpm_funcs;
37

38
static int amd_powerplay_create(struct amdgpu_device *adev)
39
{
40
	struct pp_hwmgr *hwmgr;
41

42
	if (adev == NULL)
43 44
		return -EINVAL;

45 46
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
47 48
		return -ENOMEM;

49
	hwmgr->adev = adev;
50 51
	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52 53 54 55 56
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
	hwmgr->feature_mask = amdgpu_pp_feature_mask;
57
	hwmgr->display_config = &adev->pm.pm_display_cfg;
58 59
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 61 62
	return 0;
}

63

64
static void amd_powerplay_destroy(struct amdgpu_device *adev)
65
{
66
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67

68 69
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
70

71 72
	kfree(hwmgr);
	hwmgr = NULL;
73 74
}

75 76 77
static int pp_early_init(void *handle)
{
	int ret;
78
	struct amdgpu_device *adev = handle;
79

80
	ret = amd_powerplay_create(adev);
81

82 83 84
	if (ret != 0)
		return ret;

85
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
86
	if (ret)
87
		return -EINVAL;
88

89
	return 0;
90 91
}

92
static int pp_sw_init(void *handle)
93
{
94 95
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96 97
	int ret = 0;

98
	ret = hwmgr_sw_init(hwmgr);
99

100
	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101

102 103
	return ret;
}
104

105 106
static int pp_sw_fini(void *handle)
{
107 108
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109

110
	hwmgr_sw_fini(hwmgr);
111

112 113 114
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
		release_firmware(adev->pm.fw);
		adev->pm.fw = NULL;
115
		amdgpu_ucode_fini_bo(adev);
116
	}
117

118
	return 0;
119 120 121 122
}

static int pp_hw_init(void *handle)
{
123
	int ret = 0;
124 125
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
126

127 128
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
129

130
	ret = hwmgr_hw_init(hwmgr);
131

132 133
	if (ret)
		pr_err("powerplay hw init failed\n");
134

135
	return ret;
136 137 138 139
}

static int pp_hw_fini(void *handle)
{
140 141
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
142

143
	hwmgr_hw_fini(hwmgr);
144

145 146 147
	return 0;
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
{
	int r = -EINVAL;
	void *cpu_ptr = NULL;
	uint64_t gpu_addr;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
						&adev->pm.smu_prv_buffer,
						&gpu_addr,
						&cpu_ptr)) {
		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
		return;
	}

	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
					lower_32_bits((unsigned long)cpu_ptr),
					upper_32_bits((unsigned long)cpu_ptr),
					lower_32_bits(gpu_addr),
					upper_32_bits(gpu_addr),
					adev->pm.smu_prv_buffer_size);

	if (r) {
		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
		adev->pm.smu_prv_buffer = NULL;
		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
	}
}

R
Rex Zhu 已提交
179 180
static int pp_late_init(void *handle)
{
181 182 183
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

184 185 186
	if (hwmgr && hwmgr->pm_en) {
		mutex_lock(&hwmgr->smu_lock);
		hwmgr_handle_task(hwmgr,
187
					AMD_PP_TASK_COMPLETE_INIT, NULL);
188 189
		mutex_unlock(&hwmgr->smu_lock);
	}
190 191
	if (adev->pm.smu_prv_buffer_size != 0)
		pp_reserve_vram_for_smu(adev);
R
Rex Zhu 已提交
192 193 194
	return 0;
}

195 196
static void pp_late_fini(void *handle)
{
197 198
	struct amdgpu_device *adev = handle;

199 200
	if (adev->pm.smu_prv_buffer)
		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
201
	amd_powerplay_destroy(adev);
202 203 204
}


205 206
static bool pp_is_idle(void *handle)
{
207
	return false;
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
223 224
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
225
	int ret;
226

227 228
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
229

230 231 232 233 234 235 236 237
	if (hwmgr->hwmgr_func->gfx_off_control) {
		/* Enable/disable GFX off through SMU */
		ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
							 state == AMD_PG_STATE_GATE);
		if (ret)
			pr_err("gfx off control failed!\n");
	}

238
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
239
		pr_info("%s was not implemented.\n", __func__);
240 241
		return 0;
	}
242 243 244

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
245
			state == AMD_PG_STATE_GATE);
246 247 248 249
}

static int pp_suspend(void *handle)
{
250 251
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
252

253
	return hwmgr_suspend(hwmgr);
254 255 256 257
}

static int pp_resume(void *handle)
{
258 259
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
260

261
	return hwmgr_resume(hwmgr);
262 263
}

264 265 266 267 268 269
static int pp_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	return 0;
}

270
static const struct amd_ip_funcs pp_ip_funcs = {
271
	.name = "powerplay",
272
	.early_init = pp_early_init,
R
Rex Zhu 已提交
273
	.late_init = pp_late_init,
274 275 276 277
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
278
	.late_fini = pp_late_fini,
279 280 281 282 283
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
284
	.set_clockgating_state = pp_set_clockgating_state,
285 286 287
	.set_powergating_state = pp_set_powergating_state,
};

288 289 290 291 292 293 294 295 296
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

297 298 299 300 301 302 303 304 305 306
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

307 308
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
309
	struct pp_hwmgr *hwmgr = handle;
310

311 312
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
313 314 315 316 317 318 319 320 321

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

322 323 324 325 326 327 328 329 330 331 332 333 334
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
335
			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
336 337
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
338
			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
339 340 341 342 343 344 345 346 347
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
348
			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
349 350
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
351
			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
352 353 354 355 356 357
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

358 359 360
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
361
	struct pp_hwmgr *hwmgr = handle;
362

363 364
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
365

366 367 368
	if (level == hwmgr->dpm_level)
		return 0;

369
	mutex_lock(&hwmgr->smu_lock);
370 371
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
372 373
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
374

375 376
	return 0;
}
377

378 379 380
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
381
	struct pp_hwmgr *hwmgr = handle;
382
	enum amd_dpm_forced_level level;
383

384 385
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
386

387
	mutex_lock(&hwmgr->smu_lock);
388
	level = hwmgr->dpm_level;
389
	mutex_unlock(&hwmgr->smu_lock);
390
	return level;
391
}
392

393
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
394
{
395
	struct pp_hwmgr *hwmgr = handle;
396
	uint32_t clk = 0;
397

398 399
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
400

401
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
402
		pr_info("%s was not implemented.\n", __func__);
403 404
		return 0;
	}
405
	mutex_lock(&hwmgr->smu_lock);
406
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
407
	mutex_unlock(&hwmgr->smu_lock);
408
	return clk;
409
}
410

411
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
412
{
413
	struct pp_hwmgr *hwmgr = handle;
414
	uint32_t clk = 0;
415

416 417
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
418

419
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
420
		pr_info("%s was not implemented.\n", __func__);
421 422
		return 0;
	}
423
	mutex_lock(&hwmgr->smu_lock);
424
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
425
	mutex_unlock(&hwmgr->smu_lock);
426
	return clk;
427
}
428

429
static void pp_dpm_powergate_vce(void *handle, bool gate)
430
{
431
	struct pp_hwmgr *hwmgr = handle;
432

433
	if (!hwmgr || !hwmgr->pm_en)
434
		return;
435

436
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
437
		pr_info("%s was not implemented.\n", __func__);
438
		return;
439
	}
440
	mutex_lock(&hwmgr->smu_lock);
441
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
442
	mutex_unlock(&hwmgr->smu_lock);
443
}
444

445
static void pp_dpm_powergate_uvd(void *handle, bool gate)
446
{
447
	struct pp_hwmgr *hwmgr = handle;
448

449
	if (!hwmgr || !hwmgr->pm_en)
450
		return;
451

452
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
453
		pr_info("%s was not implemented.\n", __func__);
454
		return;
455
	}
456
	mutex_lock(&hwmgr->smu_lock);
457
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
458
	mutex_unlock(&hwmgr->smu_lock);
459 460
}

461
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
462
		enum amd_pm_state_type *user_state)
463
{
464
	int ret = 0;
465
	struct pp_hwmgr *hwmgr = handle;
466

467 468
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
469

470 471 472
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
473

474
	return ret;
475
}
476

477
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
478
{
479
	struct pp_hwmgr *hwmgr = handle;
480
	struct pp_power_state *state;
481
	enum amd_pm_state_type pm_type;
482

483
	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
484 485
		return -EINVAL;

486
	mutex_lock(&hwmgr->smu_lock);
487

488 489 490 491
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
492
		pm_type = POWER_STATE_TYPE_BATTERY;
493
		break;
494
	case PP_StateUILabel_Balanced:
495
		pm_type = POWER_STATE_TYPE_BALANCED;
496
		break;
497
	case PP_StateUILabel_Performance:
498
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
499
		break;
500
	default:
501
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
502
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
503
		else
504
			pm_type = POWER_STATE_TYPE_DEFAULT;
505
		break;
506
	}
507
	mutex_unlock(&hwmgr->smu_lock);
508 509

	return pm_type;
510
}
511

512
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
513
{
514
	struct pp_hwmgr *hwmgr = handle;
515

516
	if (!hwmgr || !hwmgr->pm_en)
517
		return;
518

519
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
520
		pr_info("%s was not implemented.\n", __func__);
521
		return;
522
	}
523
	mutex_lock(&hwmgr->smu_lock);
524
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
525
	mutex_unlock(&hwmgr->smu_lock);
526 527
}

528
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
529
{
530
	struct pp_hwmgr *hwmgr = handle;
531
	uint32_t mode = 0;
532

533 534
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
535

536
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
537
		pr_info("%s was not implemented.\n", __func__);
538 539
		return 0;
	}
540
	mutex_lock(&hwmgr->smu_lock);
541
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
542
	mutex_unlock(&hwmgr->smu_lock);
543
	return mode;
544 545 546 547
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
548
	struct pp_hwmgr *hwmgr = handle;
549
	int ret = 0;
550

551 552
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
553

554
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
555
		pr_info("%s was not implemented.\n", __func__);
556 557
		return 0;
	}
558
	mutex_lock(&hwmgr->smu_lock);
559
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
560
	mutex_unlock(&hwmgr->smu_lock);
561
	return ret;
562 563 564 565
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
566
	struct pp_hwmgr *hwmgr = handle;
567
	int ret = 0;
568

569 570
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
571

572
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
573
		pr_info("%s was not implemented.\n", __func__);
574 575
		return 0;
	}
576

577
	mutex_lock(&hwmgr->smu_lock);
578
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
579
	mutex_unlock(&hwmgr->smu_lock);
580
	return ret;
581 582
}

583 584
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
585
	struct pp_hwmgr *hwmgr = handle;
586
	int ret = 0;
587

588 589
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
590 591 592 593

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

594
	mutex_lock(&hwmgr->smu_lock);
595
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
596
	mutex_unlock(&hwmgr->smu_lock);
597
	return ret;
598 599
}

600 601 602
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
603
	struct pp_hwmgr *hwmgr = handle;
604 605
	int i;

606 607
	memset(data, 0, sizeof(*data));

608
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
609 610
		return -EINVAL;

611
	mutex_lock(&hwmgr->smu_lock);
612

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
635
	mutex_unlock(&hwmgr->smu_lock);
636 637 638 639 640
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
641
	struct pp_hwmgr *hwmgr = handle;
642
	int size = 0;
643

644
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
645 646
		return -EINVAL;

647
	mutex_lock(&hwmgr->smu_lock);
648
	*table = (char *)hwmgr->soft_pp_table;
649
	size = hwmgr->soft_pp_table_size;
650
	mutex_unlock(&hwmgr->smu_lock);
651
	return size;
652 653
}

654 655
static int amd_powerplay_reset(void *handle)
{
656
	struct pp_hwmgr *hwmgr = handle;
657 658
	int ret;

659
	ret = hwmgr_hw_fini(hwmgr);
660 661 662
	if (ret)
		return ret;

663
	ret = hwmgr_hw_init(hwmgr);
664 665 666
	if (ret)
		return ret;

667
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
668 669
}

670 671
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
672
	struct pp_hwmgr *hwmgr = handle;
673
	int ret = -ENOMEM;
674

675 676
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
677

678
	mutex_lock(&hwmgr->smu_lock);
679
	if (!hwmgr->hardcode_pp_table) {
680 681 682
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
683 684
		if (!hwmgr->hardcode_pp_table)
			goto err;
685
	}
686

687 688 689 690
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;

691 692
	ret = amd_powerplay_reset(handle);
	if (ret)
693
		goto err;
694 695 696 697

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
698
			goto err;
699
	}
700
	mutex_unlock(&hwmgr->smu_lock);
701
	return 0;
702 703 704
err:
	mutex_unlock(&hwmgr->smu_lock);
	return ret;
705 706 707
}

static int pp_dpm_force_clock_level(void *handle,
708
		enum pp_clock_type type, uint32_t mask)
709
{
710
	struct pp_hwmgr *hwmgr = handle;
711
	int ret = 0;
712

713 714
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
715

716
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
717
		pr_info("%s was not implemented.\n", __func__);
718 719
		return 0;
	}
720
	mutex_lock(&hwmgr->smu_lock);
721 722 723 724
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
725
	mutex_unlock(&hwmgr->smu_lock);
726
	return ret;
727 728 729 730 731
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
732
	struct pp_hwmgr *hwmgr = handle;
733
	int ret = 0;
734

735 736
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
737

738
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
739
		pr_info("%s was not implemented.\n", __func__);
740 741
		return 0;
	}
742
	mutex_lock(&hwmgr->smu_lock);
743
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
744
	mutex_unlock(&hwmgr->smu_lock);
745
	return ret;
746 747
}

748 749
static int pp_dpm_get_sclk_od(void *handle)
{
750
	struct pp_hwmgr *hwmgr = handle;
751
	int ret = 0;
752

753 754
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
755 756

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
757
		pr_info("%s was not implemented.\n", __func__);
758 759
		return 0;
	}
760
	mutex_lock(&hwmgr->smu_lock);
761
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
762
	mutex_unlock(&hwmgr->smu_lock);
763
	return ret;
764 765 766 767
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
768
	struct pp_hwmgr *hwmgr = handle;
769
	int ret = 0;
770

771 772
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
773 774

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
775
		pr_info("%s was not implemented.\n", __func__);
776 777 778
		return 0;
	}

779
	mutex_lock(&hwmgr->smu_lock);
780
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
781
	mutex_unlock(&hwmgr->smu_lock);
782
	return ret;
783 784
}

785 786
static int pp_dpm_get_mclk_od(void *handle)
{
787
	struct pp_hwmgr *hwmgr = handle;
788
	int ret = 0;
789

790 791
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
792 793

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
794
		pr_info("%s was not implemented.\n", __func__);
795 796
		return 0;
	}
797
	mutex_lock(&hwmgr->smu_lock);
798
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
799
	mutex_unlock(&hwmgr->smu_lock);
800
	return ret;
801 802 803 804
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
805
	struct pp_hwmgr *hwmgr = handle;
806
	int ret = 0;
807

808 809
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
810 811

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
812
		pr_info("%s was not implemented.\n", __func__);
813 814
		return 0;
	}
815
	mutex_lock(&hwmgr->smu_lock);
816
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
817
	mutex_unlock(&hwmgr->smu_lock);
818
	return ret;
819 820
}

821 822
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
823
{
824
	struct pp_hwmgr *hwmgr = handle;
825
	int ret = 0;
826

827
	if (!hwmgr || !hwmgr->pm_en || !value)
828 829 830 831 832 833 834 835
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
836
		return 0;
837
	default:
838
		mutex_lock(&hwmgr->smu_lock);
839
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
840
		mutex_unlock(&hwmgr->smu_lock);
841
		return ret;
842 843 844
	}
}

845 846 847
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
848
	struct pp_hwmgr *hwmgr = handle;
849

850
	if (!hwmgr || !hwmgr->pm_en)
851 852
		return NULL;

853
	if (idx < hwmgr->num_vce_state_tables)
854
		return &hwmgr->vce_states[idx];
855 856 857
	return NULL;
}

858 859
static int pp_get_power_profile_mode(void *handle, char *buf)
{
860
	struct pp_hwmgr *hwmgr = handle;
861

862
	if (!hwmgr || !hwmgr->pm_en || !buf)
863 864 865 866 867 868 869 870 871 872 873 874
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
875
	struct pp_hwmgr *hwmgr = handle;
876
	int ret = -EINVAL;
877

878 879
	if (!hwmgr || !hwmgr->pm_en)
		return ret;
880 881 882

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
883
		return ret;
884
	}
885
	mutex_lock(&hwmgr->smu_lock);
886 887
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
888
	mutex_unlock(&hwmgr->smu_lock);
889
	return ret;
890 891
}

892 893
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
894
	struct pp_hwmgr *hwmgr = handle;
895

896
	if (!hwmgr || !hwmgr->pm_en)
897 898 899 900 901 902 903 904 905 906
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

907
static int pp_dpm_switch_power_profile(void *handle,
908
		enum PP_SMC_POWER_PROFILE type, bool en)
909
{
910
	struct pp_hwmgr *hwmgr = handle;
911 912
	long workload;
	uint32_t index;
913

914
	if (!hwmgr || !hwmgr->pm_en)
915 916
		return -EINVAL;

917 918 919 920 921 922 923 924
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

925
	mutex_lock(&hwmgr->smu_lock);
926 927 928 929 930 931 932 933 934 935 936

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
937 938
	}

939 940
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
941
	mutex_unlock(&hwmgr->smu_lock);
942

943 944 945
	return 0;
}

946 947
static int pp_set_power_limit(void *handle, uint32_t limit)
{
948
	struct pp_hwmgr *hwmgr = handle;
949

950 951
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
952 953 954 955 956 957 958 959 960 961 962 963

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

964
	mutex_lock(&hwmgr->smu_lock);
965 966
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
967
	mutex_unlock(&hwmgr->smu_lock);
968
	return 0;
969 970 971 972
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
973
	struct pp_hwmgr *hwmgr = handle;
974

975
	if (!hwmgr || !hwmgr->pm_en ||!limit)
976 977
		return -EINVAL;

978
	mutex_lock(&hwmgr->smu_lock);
979 980 981 982 983 984

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

985
	mutex_unlock(&hwmgr->smu_lock);
986

987
	return 0;
988 989
}

990
static int pp_display_configuration_change(void *handle,
991
	const struct amd_pp_display_configuration *display_config)
992
{
993
	struct pp_hwmgr *hwmgr = handle;
994

995 996
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
997

998
	mutex_lock(&hwmgr->smu_lock);
999
	phm_store_dal_configuration_data(hwmgr, display_config);
1000
	mutex_unlock(&hwmgr->smu_lock);
1001 1002
	return 0;
}
1003

1004
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
1005
		struct amd_pp_simple_clock_info *output)
1006
{
1007
	struct pp_hwmgr *hwmgr = handle;
1008
	int ret = 0;
1009

1010
	if (!hwmgr || !hwmgr->pm_en ||!output)
1011
		return -EINVAL;
1012

1013
	mutex_lock(&hwmgr->smu_lock);
1014
	ret = phm_get_dal_power_level(hwmgr, output);
1015
	mutex_unlock(&hwmgr->smu_lock);
1016
	return ret;
1017
}
1018

1019
static int pp_get_current_clocks(void *handle,
1020
		struct amd_pp_clock_info *clocks)
1021 1022 1023
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1024
	struct pp_hwmgr *hwmgr = handle;
1025
	int ret = 0;
1026

1027 1028
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1029

1030
	mutex_lock(&hwmgr->smu_lock);
1031

1032 1033
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1034 1035 1036 1037 1038 1039 1040 1041
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1042
	if (ret) {
1043
		pr_info("Error in phm_get_clock_info \n");
1044
		mutex_unlock(&hwmgr->smu_lock);
1045
		return -EINVAL;
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1064
	mutex_unlock(&hwmgr->smu_lock);
1065 1066 1067
	return 0;
}

1068
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1069
{
1070
	struct pp_hwmgr *hwmgr = handle;
1071
	int ret = 0;
1072

1073 1074
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1075

1076
	if (clocks == NULL)
1077 1078
		return -EINVAL;

1079
	mutex_lock(&hwmgr->smu_lock);
1080
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1081
	mutex_unlock(&hwmgr->smu_lock);
1082
	return ret;
1083 1084
}

1085
static int pp_get_clock_by_type_with_latency(void *handle,
1086 1087 1088
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1089
	struct pp_hwmgr *hwmgr = handle;
1090 1091
	int ret = 0;

1092
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1093 1094
		return -EINVAL;

1095
	mutex_lock(&hwmgr->smu_lock);
1096
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1097
	mutex_unlock(&hwmgr->smu_lock);
1098 1099 1100
	return ret;
}

1101
static int pp_get_clock_by_type_with_voltage(void *handle,
1102 1103 1104
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1105
	struct pp_hwmgr *hwmgr = handle;
1106 1107
	int ret = 0;

1108
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1109 1110
		return -EINVAL;

1111
	mutex_lock(&hwmgr->smu_lock);
1112 1113 1114

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1115
	mutex_unlock(&hwmgr->smu_lock);
1116 1117 1118
	return ret;
}

1119
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1120 1121
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1122
	struct pp_hwmgr *hwmgr = handle;
1123 1124
	int ret = 0;

1125
	if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
1126 1127
		return -EINVAL;

1128
	mutex_lock(&hwmgr->smu_lock);
1129 1130
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1131
	mutex_unlock(&hwmgr->smu_lock);
1132 1133 1134 1135

	return ret;
}

1136
static int pp_display_clock_voltage_request(void *handle,
1137 1138
		struct pp_display_clock_request *clock)
{
1139
	struct pp_hwmgr *hwmgr = handle;
1140 1141
	int ret = 0;

1142
	if (!hwmgr || !hwmgr->pm_en ||!clock)
1143 1144
		return -EINVAL;

1145
	mutex_lock(&hwmgr->smu_lock);
1146
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1147
	mutex_unlock(&hwmgr->smu_lock);
1148 1149 1150 1151

	return ret;
}

1152
static int pp_get_display_mode_validation_clocks(void *handle,
1153
		struct amd_pp_simple_clock_info *clocks)
1154
{
1155
	struct pp_hwmgr *hwmgr = handle;
1156
	int ret = 0;
1157

1158
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1159
		return -EINVAL;
1160

1161
	mutex_lock(&hwmgr->smu_lock);
1162

1163
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1164
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1165

1166
	mutex_unlock(&hwmgr->smu_lock);
1167
	return ret;
1168 1169
}

1170 1171
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1172
	struct pp_hwmgr *hwmgr = handle;
1173

1174 1175
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1176 1177 1178 1179 1180 1181 1182 1183 1184

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1185
static const struct amd_pm_funcs pp_dpm_funcs = {
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1212 1213
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1214
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1215 1216
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1229
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1230
};