amd_powerplay.c 29.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
27
#include <linux/slab.h>
28
#include <linux/firmware.h>
29 30
#include "amd_shared.h"
#include "amd_powerplay.h"
31
#include "power_state.h"
32
#include "amdgpu.h"
R
Rex Zhu 已提交
33
#include "hwmgr.h"
34

R
Rex Zhu 已提交
35

36
static const struct amd_pm_funcs pp_dpm_funcs;
37

38
static int amd_powerplay_create(struct amdgpu_device *adev)
39
{
40
	struct pp_hwmgr *hwmgr;
41

42
	if (adev == NULL)
43 44
		return -EINVAL;

45 46
	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
	if (hwmgr == NULL)
47 48
		return -ENOMEM;

49
	hwmgr->adev = adev;
50 51
	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52 53 54 55
	hwmgr->device = amdgpu_cgs_create_device(adev);
	mutex_init(&hwmgr->smu_lock);
	hwmgr->chip_family = adev->family;
	hwmgr->chip_id = adev->asic_type;
56
	hwmgr->feature_mask = adev->powerplay.pp_feature;
57
	hwmgr->display_config = &adev->pm.pm_display_cfg;
58 59
	adev->powerplay.pp_handle = hwmgr;
	adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 61 62
	return 0;
}

63

64
static void amd_powerplay_destroy(struct amdgpu_device *adev)
65
{
66
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67

68 69
	kfree(hwmgr->hardcode_pp_table);
	hwmgr->hardcode_pp_table = NULL;
70

71 72
	kfree(hwmgr);
	hwmgr = NULL;
73 74
}

75 76 77
static int pp_early_init(void *handle)
{
	int ret;
78
	struct amdgpu_device *adev = handle;
79

80
	ret = amd_powerplay_create(adev);
81

82 83 84
	if (ret != 0)
		return ret;

85
	ret = hwmgr_early_init(adev->powerplay.pp_handle);
86
	if (ret)
87
		return -EINVAL;
88

89
	return 0;
90 91
}

92
static int pp_sw_init(void *handle)
93
{
94 95
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96 97
	int ret = 0;

98
	ret = hwmgr_sw_init(hwmgr);
99

100
	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101

102 103
	return ret;
}
104

105 106
static int pp_sw_fini(void *handle)
{
107 108
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109

110
	hwmgr_sw_fini(hwmgr);
111

112 113 114
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
		release_firmware(adev->pm.fw);
		adev->pm.fw = NULL;
115
		amdgpu_ucode_fini_bo(adev);
116
	}
117

118
	return 0;
119 120 121 122
}

static int pp_hw_init(void *handle)
{
123
	int ret = 0;
124 125
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
126

127 128
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);
129

130
	ret = hwmgr_hw_init(hwmgr);
131

132 133
	if (ret)
		pr_err("powerplay hw init failed\n");
134

135
	return ret;
136 137 138 139
}

static int pp_hw_fini(void *handle)
{
140 141
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
142

143
	hwmgr_hw_fini(hwmgr);
144

145 146 147
	return 0;
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
{
	int r = -EINVAL;
	void *cpu_ptr = NULL;
	uint64_t gpu_addr;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
						&adev->pm.smu_prv_buffer,
						&gpu_addr,
						&cpu_ptr)) {
		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
		return;
	}

	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
					lower_32_bits((unsigned long)cpu_ptr),
					upper_32_bits((unsigned long)cpu_ptr),
					lower_32_bits(gpu_addr),
					upper_32_bits(gpu_addr),
					adev->pm.smu_prv_buffer_size);

	if (r) {
		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
		adev->pm.smu_prv_buffer = NULL;
		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
	}
}

R
Rex Zhu 已提交
179 180
static int pp_late_init(void *handle)
{
181 182 183
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;

184 185 186
	if (hwmgr && hwmgr->pm_en) {
		mutex_lock(&hwmgr->smu_lock);
		hwmgr_handle_task(hwmgr,
187
					AMD_PP_TASK_COMPLETE_INIT, NULL);
188 189
		mutex_unlock(&hwmgr->smu_lock);
	}
190 191
	if (adev->pm.smu_prv_buffer_size != 0)
		pp_reserve_vram_for_smu(adev);
192

R
Rex Zhu 已提交
193 194 195
	return 0;
}

196 197
static void pp_late_fini(void *handle)
{
198 199
	struct amdgpu_device *adev = handle;

200 201
	if (adev->pm.smu_prv_buffer)
		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
202
	amd_powerplay_destroy(adev);
203 204 205
}


206 207
static bool pp_is_idle(void *handle)
{
208
	return false;
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
}

static int pp_wait_for_idle(void *handle)
{
	return 0;
}

static int pp_sw_reset(void *handle)
{
	return 0;
}

static int pp_set_powergating_state(void *handle,
				    enum amd_powergating_state state)
{
224 225
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
226
	int ret;
227

228 229
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
230

231 232 233 234 235 236 237 238
	if (hwmgr->hwmgr_func->gfx_off_control) {
		/* Enable/disable GFX off through SMU */
		ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
							 state == AMD_PG_STATE_GATE);
		if (ret)
			pr_err("gfx off control failed!\n");
	}

239
	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
240
		pr_debug("%s was not implemented.\n", __func__);
241 242
		return 0;
	}
243 244 245

	/* Enable/disable GFX per cu powergating through SMU */
	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
246
			state == AMD_PG_STATE_GATE);
247 248 249 250
}

static int pp_suspend(void *handle)
{
251 252
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
253

254
	return hwmgr_suspend(hwmgr);
255 256 257 258
}

static int pp_resume(void *handle)
{
259 260
	struct amdgpu_device *adev = handle;
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
261

262
	return hwmgr_resume(hwmgr);
263 264
}

265 266 267 268 269 270
static int pp_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	return 0;
}

271
static const struct amd_ip_funcs pp_ip_funcs = {
272
	.name = "powerplay",
273
	.early_init = pp_early_init,
R
Rex Zhu 已提交
274
	.late_init = pp_late_init,
275 276 277 278
	.sw_init = pp_sw_init,
	.sw_fini = pp_sw_fini,
	.hw_init = pp_hw_init,
	.hw_fini = pp_hw_fini,
279
	.late_fini = pp_late_fini,
280 281 282 283 284
	.suspend = pp_suspend,
	.resume = pp_resume,
	.is_idle = pp_is_idle,
	.wait_for_idle = pp_wait_for_idle,
	.soft_reset = pp_sw_reset,
285
	.set_clockgating_state = pp_set_clockgating_state,
286 287 288
	.set_powergating_state = pp_set_powergating_state,
};

289 290 291 292 293 294 295 296 297
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &pp_ip_funcs,
};

298 299 300 301 302 303 304 305 306 307
static int pp_dpm_load_fw(void *handle)
{
	return 0;
}

static int pp_dpm_fw_loading_complete(void *handle)
{
	return 0;
}

308 309
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
310
	struct pp_hwmgr *hwmgr = handle;
311

312 313
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
314 315 316 317 318 319 320 321 322

	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}

323 324 325 326 327 328 329 330 331 332 333 334 335
static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
						enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	if (!(hwmgr->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			hwmgr->saved_dpm_level = hwmgr->dpm_level;
			hwmgr->en_umd_pstate = true;
336
			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
337 338
						AMD_IP_BLOCK_TYPE_GFX,
						AMD_CG_STATE_UNGATE);
339
			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
340 341 342 343 344 345 346 347 348
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = hwmgr->saved_dpm_level;
			hwmgr->en_umd_pstate = false;
349
			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
350 351
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_CG_STATE_GATE);
352
			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
353 354 355 356 357 358
					AMD_IP_BLOCK_TYPE_GFX,
					AMD_PG_STATE_GATE);
		}
	}
}

359 360 361
static int pp_dpm_force_performance_level(void *handle,
					enum amd_dpm_forced_level level)
{
362
	struct pp_hwmgr *hwmgr = handle;
363

364 365
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
366

367 368 369
	if (level == hwmgr->dpm_level)
		return 0;

370
	mutex_lock(&hwmgr->smu_lock);
371 372
	pp_dpm_en_umd_pstate(hwmgr, &level);
	hwmgr->request_dpm_level = level;
373 374
	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
	mutex_unlock(&hwmgr->smu_lock);
375

376 377
	return 0;
}
378

379 380 381
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
								void *handle)
{
382
	struct pp_hwmgr *hwmgr = handle;
383
	enum amd_dpm_forced_level level;
384

385 386
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
387

388
	mutex_lock(&hwmgr->smu_lock);
389
	level = hwmgr->dpm_level;
390
	mutex_unlock(&hwmgr->smu_lock);
391
	return level;
392
}
393

394
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
395
{
396
	struct pp_hwmgr *hwmgr = handle;
397
	uint32_t clk = 0;
398

399 400
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
401

402
	if (hwmgr->hwmgr_func->get_sclk == NULL) {
403
		pr_info("%s was not implemented.\n", __func__);
404 405
		return 0;
	}
406
	mutex_lock(&hwmgr->smu_lock);
407
	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
408
	mutex_unlock(&hwmgr->smu_lock);
409
	return clk;
410
}
411

412
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
413
{
414
	struct pp_hwmgr *hwmgr = handle;
415
	uint32_t clk = 0;
416

417 418
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
419

420
	if (hwmgr->hwmgr_func->get_mclk == NULL) {
421
		pr_info("%s was not implemented.\n", __func__);
422 423
		return 0;
	}
424
	mutex_lock(&hwmgr->smu_lock);
425
	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
426
	mutex_unlock(&hwmgr->smu_lock);
427
	return clk;
428
}
429

430
static void pp_dpm_powergate_vce(void *handle, bool gate)
431
{
432
	struct pp_hwmgr *hwmgr = handle;
433

434
	if (!hwmgr || !hwmgr->pm_en)
435
		return;
436

437
	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
438
		pr_info("%s was not implemented.\n", __func__);
439
		return;
440
	}
441
	mutex_lock(&hwmgr->smu_lock);
442
	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
443
	mutex_unlock(&hwmgr->smu_lock);
444
}
445

446
static void pp_dpm_powergate_uvd(void *handle, bool gate)
447
{
448
	struct pp_hwmgr *hwmgr = handle;
449

450
	if (!hwmgr || !hwmgr->pm_en)
451
		return;
452

453
	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
454
		pr_info("%s was not implemented.\n", __func__);
455
		return;
456
	}
457
	mutex_lock(&hwmgr->smu_lock);
458
	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
459
	mutex_unlock(&hwmgr->smu_lock);
460 461
}

462
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
463
		enum amd_pm_state_type *user_state)
464
{
465
	int ret = 0;
466
	struct pp_hwmgr *hwmgr = handle;
467

468 469
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
470

471 472 473
	mutex_lock(&hwmgr->smu_lock);
	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
	mutex_unlock(&hwmgr->smu_lock);
474

475
	return ret;
476
}
477

478
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
479
{
480
	struct pp_hwmgr *hwmgr = handle;
481
	struct pp_power_state *state;
482
	enum amd_pm_state_type pm_type;
483

484
	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
485 486
		return -EINVAL;

487
	mutex_lock(&hwmgr->smu_lock);
488

489 490 491 492
	state = hwmgr->current_ps;

	switch (state->classification.ui_label) {
	case PP_StateUILabel_Battery:
493
		pm_type = POWER_STATE_TYPE_BATTERY;
494
		break;
495
	case PP_StateUILabel_Balanced:
496
		pm_type = POWER_STATE_TYPE_BALANCED;
497
		break;
498
	case PP_StateUILabel_Performance:
499
		pm_type = POWER_STATE_TYPE_PERFORMANCE;
500
		break;
501
	default:
502
		if (state->classification.flags & PP_StateClassificationFlag_Boot)
503
			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
504
		else
505
			pm_type = POWER_STATE_TYPE_DEFAULT;
506
		break;
507
	}
508
	mutex_unlock(&hwmgr->smu_lock);
509 510

	return pm_type;
511
}
512

513
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
514
{
515
	struct pp_hwmgr *hwmgr = handle;
516

517
	if (!hwmgr || !hwmgr->pm_en)
518
		return;
519

520
	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
521
		pr_info("%s was not implemented.\n", __func__);
522
		return;
523
	}
524
	mutex_lock(&hwmgr->smu_lock);
525
	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
526
	mutex_unlock(&hwmgr->smu_lock);
527 528
}

529
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
530
{
531
	struct pp_hwmgr *hwmgr = handle;
532
	uint32_t mode = 0;
533

534 535
	if (!hwmgr || !hwmgr->pm_en)
		return 0;
536

537
	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
538
		pr_info("%s was not implemented.\n", __func__);
539 540
		return 0;
	}
541
	mutex_lock(&hwmgr->smu_lock);
542
	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
543
	mutex_unlock(&hwmgr->smu_lock);
544
	return mode;
545 546 547 548
}

static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
549
	struct pp_hwmgr *hwmgr = handle;
550
	int ret = 0;
551

552 553
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
554

555
	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
556
		pr_info("%s was not implemented.\n", __func__);
557 558
		return 0;
	}
559
	mutex_lock(&hwmgr->smu_lock);
560
	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
561
	mutex_unlock(&hwmgr->smu_lock);
562
	return ret;
563 564 565 566
}

static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
567
	struct pp_hwmgr *hwmgr = handle;
568
	int ret = 0;
569

570 571
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
572

573
	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
574
		pr_info("%s was not implemented.\n", __func__);
575 576
		return 0;
	}
577

578
	mutex_lock(&hwmgr->smu_lock);
579
	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
580
	mutex_unlock(&hwmgr->smu_lock);
581
	return ret;
582 583
}

584 585
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
586
	struct pp_hwmgr *hwmgr = handle;
587
	int ret = 0;
588

589 590
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
591 592 593 594

	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
		return -EINVAL;

595
	mutex_lock(&hwmgr->smu_lock);
596
	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
597
	mutex_unlock(&hwmgr->smu_lock);
598
	return ret;
599 600
}

601 602 603
static int pp_dpm_get_pp_num_states(void *handle,
		struct pp_states_info *data)
{
604
	struct pp_hwmgr *hwmgr = handle;
605 606
	int i;

607 608
	memset(data, 0, sizeof(*data));

609
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
610 611
		return -EINVAL;

612
	mutex_lock(&hwmgr->smu_lock);
613

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	data->nums = hwmgr->num_ps;

	for (i = 0; i < hwmgr->num_ps; i++) {
		struct pp_power_state *state = (struct pp_power_state *)
				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
		switch (state->classification.ui_label) {
		case PP_StateUILabel_Battery:
			data->states[i] = POWER_STATE_TYPE_BATTERY;
			break;
		case PP_StateUILabel_Balanced:
			data->states[i] = POWER_STATE_TYPE_BALANCED;
			break;
		case PP_StateUILabel_Performance:
			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
			break;
		default:
			if (state->classification.flags & PP_StateClassificationFlag_Boot)
				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
			else
				data->states[i] = POWER_STATE_TYPE_DEFAULT;
		}
	}
636
	mutex_unlock(&hwmgr->smu_lock);
637 638 639 640 641
	return 0;
}

static int pp_dpm_get_pp_table(void *handle, char **table)
{
642
	struct pp_hwmgr *hwmgr = handle;
643
	int size = 0;
644

645
	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
646 647
		return -EINVAL;

648
	mutex_lock(&hwmgr->smu_lock);
649
	*table = (char *)hwmgr->soft_pp_table;
650
	size = hwmgr->soft_pp_table_size;
651
	mutex_unlock(&hwmgr->smu_lock);
652
	return size;
653 654
}

655 656
static int amd_powerplay_reset(void *handle)
{
657
	struct pp_hwmgr *hwmgr = handle;
658 659
	int ret;

660
	ret = hwmgr_hw_fini(hwmgr);
661 662 663
	if (ret)
		return ret;

664
	ret = hwmgr_hw_init(hwmgr);
665 666 667
	if (ret)
		return ret;

668
	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
669 670
}

671 672
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
673
	struct pp_hwmgr *hwmgr = handle;
674
	int ret = -ENOMEM;
675

676 677
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
678

679
	mutex_lock(&hwmgr->smu_lock);
680
	if (!hwmgr->hardcode_pp_table) {
681 682 683
		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
						   hwmgr->soft_pp_table_size,
						   GFP_KERNEL);
684 685
		if (!hwmgr->hardcode_pp_table)
			goto err;
686
	}
687

688 689 690 691
	memcpy(hwmgr->hardcode_pp_table, buf, size);

	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;

692 693
	ret = amd_powerplay_reset(handle);
	if (ret)
694
		goto err;
695 696 697 698

	if (hwmgr->hwmgr_func->avfs_control) {
		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
		if (ret)
699
			goto err;
700
	}
701
	mutex_unlock(&hwmgr->smu_lock);
702
	return 0;
703 704 705
err:
	mutex_unlock(&hwmgr->smu_lock);
	return ret;
706 707 708
}

static int pp_dpm_force_clock_level(void *handle,
709
		enum pp_clock_type type, uint32_t mask)
710
{
711
	struct pp_hwmgr *hwmgr = handle;
712
	int ret = 0;
713

714 715
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
716

717
	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
718
		pr_info("%s was not implemented.\n", __func__);
719 720
		return 0;
	}
721
	mutex_lock(&hwmgr->smu_lock);
722 723 724 725
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
	else
		ret = -EINVAL;
726
	mutex_unlock(&hwmgr->smu_lock);
727
	return ret;
728 729 730 731 732
}

static int pp_dpm_print_clock_levels(void *handle,
		enum pp_clock_type type, char *buf)
{
733
	struct pp_hwmgr *hwmgr = handle;
734
	int ret = 0;
735

736 737
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
738

739
	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
740
		pr_info("%s was not implemented.\n", __func__);
741 742
		return 0;
	}
743
	mutex_lock(&hwmgr->smu_lock);
744
	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
745
	mutex_unlock(&hwmgr->smu_lock);
746
	return ret;
747 748
}

749 750
static int pp_dpm_get_sclk_od(void *handle)
{
751
	struct pp_hwmgr *hwmgr = handle;
752
	int ret = 0;
753

754 755
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
756 757

	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
758
		pr_info("%s was not implemented.\n", __func__);
759 760
		return 0;
	}
761
	mutex_lock(&hwmgr->smu_lock);
762
	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
763
	mutex_unlock(&hwmgr->smu_lock);
764
	return ret;
765 766 767 768
}

static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
769
	struct pp_hwmgr *hwmgr = handle;
770
	int ret = 0;
771

772 773
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
774 775

	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
776
		pr_info("%s was not implemented.\n", __func__);
777 778 779
		return 0;
	}

780
	mutex_lock(&hwmgr->smu_lock);
781
	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
782
	mutex_unlock(&hwmgr->smu_lock);
783
	return ret;
784 785
}

786 787
static int pp_dpm_get_mclk_od(void *handle)
{
788
	struct pp_hwmgr *hwmgr = handle;
789
	int ret = 0;
790

791 792
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
793 794

	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
795
		pr_info("%s was not implemented.\n", __func__);
796 797
		return 0;
	}
798
	mutex_lock(&hwmgr->smu_lock);
799
	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
800
	mutex_unlock(&hwmgr->smu_lock);
801
	return ret;
802 803 804 805
}

static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
806
	struct pp_hwmgr *hwmgr = handle;
807
	int ret = 0;
808

809 810
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
811 812

	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
813
		pr_info("%s was not implemented.\n", __func__);
814 815
		return 0;
	}
816
	mutex_lock(&hwmgr->smu_lock);
817
	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
818
	mutex_unlock(&hwmgr->smu_lock);
819
	return ret;
820 821
}

822 823
static int pp_dpm_read_sensor(void *handle, int idx,
			      void *value, int *size)
824
{
825
	struct pp_hwmgr *hwmgr = handle;
826
	int ret = 0;
827

828
	if (!hwmgr || !hwmgr->pm_en || !value)
829 830 831 832 833 834 835 836
		return -EINVAL;

	switch (idx) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)value) = hwmgr->pstate_sclk;
		return 0;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)value) = hwmgr->pstate_mclk;
837
		return 0;
838
	default:
839
		mutex_lock(&hwmgr->smu_lock);
840
		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
841
		mutex_unlock(&hwmgr->smu_lock);
842
		return ret;
843 844 845
	}
}

846 847 848
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
849
	struct pp_hwmgr *hwmgr = handle;
850

851
	if (!hwmgr || !hwmgr->pm_en)
852 853
		return NULL;

854
	if (idx < hwmgr->num_vce_state_tables)
855
		return &hwmgr->vce_states[idx];
856 857 858
	return NULL;
}

859 860
static int pp_get_power_profile_mode(void *handle, char *buf)
{
861
	struct pp_hwmgr *hwmgr = handle;
862

863
	if (!hwmgr || !hwmgr->pm_en || !buf)
864 865 866 867 868 869 870 871 872 873 874 875
		return -EINVAL;

	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return snprintf(buf, PAGE_SIZE, "\n");
	}

	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}

static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
876
	struct pp_hwmgr *hwmgr = handle;
877
	int ret = -EINVAL;
878

879 880
	if (!hwmgr || !hwmgr->pm_en)
		return ret;
881 882 883

	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
884
		return ret;
885
	}
886
	mutex_lock(&hwmgr->smu_lock);
887 888
	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
889
	mutex_unlock(&hwmgr->smu_lock);
890
	return ret;
891 892
}

893 894
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
895
	struct pp_hwmgr *hwmgr = handle;
896

897
	if (!hwmgr || !hwmgr->pm_en)
898 899 900 901 902 903 904 905 906 907
		return -EINVAL;

	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}

908
static int pp_dpm_switch_power_profile(void *handle,
909
		enum PP_SMC_POWER_PROFILE type, bool en)
910
{
911
	struct pp_hwmgr *hwmgr = handle;
912 913
	long workload;
	uint32_t index;
914

915
	if (!hwmgr || !hwmgr->pm_en)
916 917
		return -EINVAL;

918 919 920 921 922 923 924 925
	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

926
	mutex_lock(&hwmgr->smu_lock);
927 928 929 930 931 932 933 934 935 936 937

	if (!en) {
		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
	} else {
		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
		index = fls(hwmgr->workload_mask);
		index = index <= Workload_Policy_Max ? index - 1 : 0;
		workload = hwmgr->workload_setting[index];
938 939
	}

940 941
	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
942
	mutex_unlock(&hwmgr->smu_lock);
943

944 945 946
	return 0;
}

947 948
static int pp_set_power_limit(void *handle, uint32_t limit)
{
949
	struct pp_hwmgr *hwmgr = handle;
950

951 952
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
953 954 955 956 957 958 959 960 961 962 963 964

	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return -EINVAL;
	}

	if (limit == 0)
		limit = hwmgr->default_power_limit;

	if (limit > hwmgr->default_power_limit)
		return -EINVAL;

965
	mutex_lock(&hwmgr->smu_lock);
966 967
	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
	hwmgr->power_limit = limit;
968
	mutex_unlock(&hwmgr->smu_lock);
969
	return 0;
970 971 972 973
}

static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
974
	struct pp_hwmgr *hwmgr = handle;
975

976
	if (!hwmgr || !hwmgr->pm_en ||!limit)
977 978
		return -EINVAL;

979
	mutex_lock(&hwmgr->smu_lock);
980 981 982 983 984 985

	if (default_limit)
		*limit = hwmgr->default_power_limit;
	else
		*limit = hwmgr->power_limit;

986
	mutex_unlock(&hwmgr->smu_lock);
987

988
	return 0;
989 990
}

991
static int pp_display_configuration_change(void *handle,
992
	const struct amd_pp_display_configuration *display_config)
993
{
994
	struct pp_hwmgr *hwmgr = handle;
995

996 997
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
998

999
	mutex_lock(&hwmgr->smu_lock);
1000
	phm_store_dal_configuration_data(hwmgr, display_config);
1001
	mutex_unlock(&hwmgr->smu_lock);
1002 1003
	return 0;
}
1004

1005
static int pp_get_display_power_level(void *handle,
R
Rex Zhu 已提交
1006
		struct amd_pp_simple_clock_info *output)
1007
{
1008
	struct pp_hwmgr *hwmgr = handle;
1009
	int ret = 0;
1010

1011
	if (!hwmgr || !hwmgr->pm_en ||!output)
1012
		return -EINVAL;
1013

1014
	mutex_lock(&hwmgr->smu_lock);
1015
	ret = phm_get_dal_power_level(hwmgr, output);
1016
	mutex_unlock(&hwmgr->smu_lock);
1017
	return ret;
1018
}
1019

1020
static int pp_get_current_clocks(void *handle,
1021
		struct amd_pp_clock_info *clocks)
1022 1023 1024
{
	struct amd_pp_simple_clock_info simple_clocks;
	struct pp_clock_info hw_clocks;
1025
	struct pp_hwmgr *hwmgr = handle;
1026
	int ret = 0;
1027

1028 1029
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1030

1031
	mutex_lock(&hwmgr->smu_lock);
1032

1033 1034
	phm_get_dal_power_level(hwmgr, &simple_clocks);

1035 1036 1037 1038 1039 1040 1041 1042
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_PowerContainment))
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
	else
		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);

1043
	if (ret) {
1044
		pr_info("Error in phm_get_clock_info \n");
1045
		mutex_unlock(&hwmgr->smu_lock);
1046
		return -EINVAL;
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;

	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

	clocks->max_clocks_state = simple_clocks.level;

	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
	}
1065
	mutex_unlock(&hwmgr->smu_lock);
1066 1067 1068
	return 0;
}

1069
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1070
{
1071
	struct pp_hwmgr *hwmgr = handle;
1072
	int ret = 0;
1073

1074 1075
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1076

1077
	if (clocks == NULL)
1078 1079
		return -EINVAL;

1080
	mutex_lock(&hwmgr->smu_lock);
1081
	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1082
	mutex_unlock(&hwmgr->smu_lock);
1083
	return ret;
1084 1085
}

1086
static int pp_get_clock_by_type_with_latency(void *handle,
1087 1088 1089
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_latency *clocks)
{
1090
	struct pp_hwmgr *hwmgr = handle;
1091 1092
	int ret = 0;

1093
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1094 1095
		return -EINVAL;

1096
	mutex_lock(&hwmgr->smu_lock);
1097
	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1098
	mutex_unlock(&hwmgr->smu_lock);
1099 1100 1101
	return ret;
}

1102
static int pp_get_clock_by_type_with_voltage(void *handle,
1103 1104 1105
		enum amd_pp_clock_type type,
		struct pp_clock_levels_with_voltage *clocks)
{
1106
	struct pp_hwmgr *hwmgr = handle;
1107 1108
	int ret = 0;

1109
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1110 1111
		return -EINVAL;

1112
	mutex_lock(&hwmgr->smu_lock);
1113 1114 1115

	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);

1116
	mutex_unlock(&hwmgr->smu_lock);
1117 1118 1119
	return ret;
}

1120
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1121 1122
		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
1123
	struct pp_hwmgr *hwmgr = handle;
1124 1125
	int ret = 0;

1126
	if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
1127 1128
		return -EINVAL;

1129
	mutex_lock(&hwmgr->smu_lock);
1130 1131
	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
			wm_with_clock_ranges);
1132
	mutex_unlock(&hwmgr->smu_lock);
1133 1134 1135 1136

	return ret;
}

1137
static int pp_display_clock_voltage_request(void *handle,
1138 1139
		struct pp_display_clock_request *clock)
{
1140
	struct pp_hwmgr *hwmgr = handle;
1141 1142
	int ret = 0;

1143
	if (!hwmgr || !hwmgr->pm_en ||!clock)
1144 1145
		return -EINVAL;

1146
	mutex_lock(&hwmgr->smu_lock);
1147
	ret = phm_display_clock_voltage_request(hwmgr, clock);
1148
	mutex_unlock(&hwmgr->smu_lock);
1149 1150 1151 1152

	return ret;
}

1153
static int pp_get_display_mode_validation_clocks(void *handle,
1154
		struct amd_pp_simple_clock_info *clocks)
1155
{
1156
	struct pp_hwmgr *hwmgr = handle;
1157
	int ret = 0;
1158

1159
	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1160
		return -EINVAL;
1161

1162
	mutex_lock(&hwmgr->smu_lock);
1163

1164
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1165
		ret = phm_get_max_high_clocks(hwmgr, clocks);
1166

1167
	mutex_unlock(&hwmgr->smu_lock);
1168
	return ret;
1169 1170
}

1171 1172
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
1173
	struct pp_hwmgr *hwmgr = handle;
1174

1175 1176
	if (!hwmgr || !hwmgr->pm_en)
		return -EINVAL;
1177 1178 1179 1180 1181 1182 1183 1184 1185

	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
		pr_info("%s was not implemented.\n", __func__);
		return 0;
	}

	return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}

1186
static const struct amd_pm_funcs pp_dpm_funcs = {
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
	.load_firmware = pp_dpm_load_fw,
	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
	.force_performance_level = pp_dpm_force_performance_level,
	.get_performance_level = pp_dpm_get_performance_level,
	.get_current_power_state = pp_dpm_get_current_power_state,
	.powergate_vce = pp_dpm_powergate_vce,
	.powergate_uvd = pp_dpm_powergate_uvd,
	.dispatch_tasks = pp_dpm_dispatch_tasks,
	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
	.get_pp_num_states = pp_dpm_get_pp_num_states,
	.get_pp_table = pp_dpm_get_pp_table,
	.set_pp_table = pp_dpm_set_pp_table,
	.force_clock_level = pp_dpm_force_clock_level,
	.print_clock_levels = pp_dpm_print_clock_levels,
	.get_sclk_od = pp_dpm_get_sclk_od,
	.set_sclk_od = pp_dpm_set_sclk_od,
	.get_mclk_od = pp_dpm_get_mclk_od,
	.set_mclk_od = pp_dpm_set_mclk_od,
	.read_sensor = pp_dpm_read_sensor,
	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
	.switch_power_profile = pp_dpm_switch_power_profile,
	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1213 1214
	.get_power_profile_mode = pp_get_power_profile_mode,
	.set_power_profile_mode = pp_set_power_profile_mode,
1215
	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1216 1217
	.set_power_limit = pp_set_power_limit,
	.get_power_limit = pp_get_power_limit,
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
/* export to DC */
	.get_sclk = pp_dpm_get_sclk,
	.get_mclk = pp_dpm_get_mclk,
	.display_configuration_change = pp_display_configuration_change,
	.get_display_power_level = pp_get_display_power_level,
	.get_current_clocks = pp_get_current_clocks,
	.get_clock_by_type = pp_get_clock_by_type,
	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
	.display_clock_voltage_request = pp_display_clock_voltage_request,
	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1230
	.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1231
};